^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _ASM_X86_EDAC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _ASM_X86_EDAC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) /* ECC atomic, DMA, SMP and interrupt safe scrub function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) static inline void edac_atomic_scrub(void *va, u32 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) u32 i, *virt_addr = va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Very carefully read and write to memory atomically so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * are interrupt, DMA and SMP safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) for (i = 0; i < size / 4; i++, virt_addr++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) asm volatile("lock; addl $0, %0"::"m" (*virt_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #endif /* _ASM_X86_EDAC_H */