^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #ifndef __ASM_BARRIER_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define __ASM_BARRIER_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #ifdef CONFIG_ISA_ARCV2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * ARCv2 based HS38 cores are in-order issue, but still weakly ordered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * due to micro-arch buffering/queuing of load/store, cache hit vs. miss ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Explicit barrier provided by DMB instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * - Operand supports fine grained load/store/load+store semantics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * - Ensures that selected memory operation issued before it will complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * before any subsequent memory operation of same type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * - DMB guarantees SMP as well as local barrier semantics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * (asm-generic/barrier.h ensures sane smp_*mb if not defined here, i.e.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * UP: barrier(), SMP: smp_*mb == *mb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * - DSYNC provides DMB+completion_of_cache_bpu_maintenance_ops hence not needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * in the general case. Plus it only provides full barrier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define mb() asm volatile("dmb 3\n" : : : "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define rmb() asm volatile("dmb 1\n" : : : "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define wmb() asm volatile("dmb 2\n" : : : "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * ARCompact based cores (ARC700) only have SYNC instruction which is super
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * heavy weight as it flushes the pipeline as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * There are no real SMP implementations of such cores.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define mb() asm volatile("sync\n" : : : "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <asm-generic/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #endif