^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Based on arch/arm/include/asm/barrier.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2013 Regents of the University of California
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2017 SiFive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #ifndef _ASM_RISCV_BARRIER_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define _ASM_RISCV_BARRIER_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define nop() __asm__ __volatile__ ("nop")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define RISCV_FENCE(p, s) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) __asm__ __volatile__ ("fence " #p "," #s : : : "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /* These barriers need to enforce ordering on both devices or memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define mb() RISCV_FENCE(iorw,iorw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define rmb() RISCV_FENCE(ir,ir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define wmb() RISCV_FENCE(ow,ow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /* These barriers do not need to enforce ordering on devices, just memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define __smp_mb() RISCV_FENCE(rw,rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define __smp_rmb() RISCV_FENCE(r,r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define __smp_wmb() RISCV_FENCE(w,w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define __smp_store_release(p, v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) compiletime_assert_atomic_type(*p); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) RISCV_FENCE(rw,w); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) WRITE_ONCE(*p, v); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define __smp_load_acquire(p) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) typeof(*p) ___p1 = READ_ONCE(*p); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) compiletime_assert_atomic_type(*p); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) RISCV_FENCE(r,rw); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) ___p1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * This is a very specific barrier: it's currently only used in two places in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * the kernel, both in the scheduler. See include/linux/spinlock.h for the two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * orderings it guarantees, but the "critical section is RCsc" guarantee
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * mandates a barrier on RISC-V. The sequence looks like:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * lr.aq lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * sc lock <= LOCKED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * smp_mb__after_spinlock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * // critical section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * lr lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * sc.rl lock <= UNLOCKED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * The AQ/RL pair provides a RCpc critical section, but there's not really any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * way we can take advantage of that here because the ordering is only enforced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * on that one lock. Thus, we're just doing a full fence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * Since we allow writeX to be called from preemptive regions we need at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * an "o" in the predecessor set to ensure device writes are visible before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * task is marked as available for scheduling on a new hart. While I don't see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * any concrete reason we need a full IO fence, it seems safer to just upgrade
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * this in order to avoid any IO crossing a scheduling boundary. In both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * instances the scheduler pairs this with an mb(), so nothing is necessary on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * the new hart.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #include <asm-generic/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #endif /* __ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #endif /* _ASM_RISCV_BARRIER_H */