^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) #ifndef _ASM_POWERPC_MEMBARRIER_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #define _ASM_POWERPC_MEMBARRIER_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) struct mm_struct *next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Only need the full barrier when switching between processes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Barrier when switching from kernel to userspace is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * required here, given that it is implied by mmdrop(). Barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * when switching from userspace to kernel is not needed after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * store to rq->curr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) if (likely(!(atomic_read(&next->membarrier_state) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) (MEMBARRIER_STATE_PRIVATE_EXPEDITED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) MEMBARRIER_STATE_GLOBAL_EXPEDITED)) || !prev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * The membarrier system call requires a full memory barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * after storing to rq->curr, before going back to user-space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #endif /* _ASM_POWERPC_MEMBARRIER_H */