^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Microblaze support for cache consistent memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2010 Michal Simek <monstr@monstr.eu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2010 PetaLogix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/cpuinfo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) void arch_dma_prep_coherent(struct page *page, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) phys_addr_t paddr = page_to_phys(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) flush_dcache_range(paddr, paddr + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #ifndef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * Consistent memory allocators. Used for DMA devices that want to share
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * uncached memory with the processor core. My crufty no-MMU approach is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * simple. In the HW platform we can optionally mirror the DDR up above the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * processor cacheable region. So, memory accessed in this mirror region will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * not be cached. It's alloced from the same pool as normal memory, but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * handle we return is shifted up into the uncached region. This will no doubt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * cause big problems if memory allocated here is not also freed properly. -- JW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * I have to use dcache values because I can't relate on ram size:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #ifdef CONFIG_XILINX_UNCACHED_SHADOW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define UNCACHED_SHADOW_MASK 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #endif /* CONFIG_XILINX_UNCACHED_SHADOW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) void *arch_dma_set_uncached(void *ptr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) unsigned long addr = (unsigned long)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) addr |= UNCACHED_SHADOW_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (addr > cpuinfo.dcache_base && addr < cpuinfo.dcache_high)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return (void *)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #endif /* CONFIG_MMU */