^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * PPC EDAC common defs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Dave Jiang <djiang@mvista.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * 2007 (c) MontaVista Software, Inc. This file is licensed under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * the terms of the GNU General Public License version 2. This program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * is licensed "as is" without any warranty of any kind, whether express
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * or implied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #ifndef ASM_EDAC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define ASM_EDAC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * ECC atomic, DMA, SMP and interrupt safe scrub function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Implements the per arch edac_atomic_scrub() that EDAC use for software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * ECC scrubbing. It reads memory and then writes back the original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * value, allowing the hardware to detect and correct memory errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static __inline__ void edac_atomic_scrub(void *va, u32 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) unsigned int *virt_addr = va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) unsigned int temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) for (i = 0; i < size / sizeof(*virt_addr); i++, virt_addr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* Very carefully read and write to memory atomically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * so we are interrupt, DMA and SMP safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) __asm__ __volatile__ ("\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) 1: lwarx %0,0,%1\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) stwcx. %0,0,%1\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) bne- 1b\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) isync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) : "=&r"(temp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) : "r"(virt_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) : "cr0", "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #endif