^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) static inline bool arc_uncached_addr_space(phys_addr_t paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) if (is_isa_arcompact()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) if (paddr >= ARC_UNCACHED_ADDR_SPACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) } else if (paddr >= perip_base && paddr <= perip_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) void __iomem *ioremap(phys_addr_t paddr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) phys_addr_t end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /* Don't allow wraparound or zero size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) end = paddr + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) if (!size || (end < paddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * If the region is h/w uncached, MMU mapping can be elided as optim
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * The cast to u32 is fine as this region can only be inside 4GB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) if (arc_uncached_addr_space(paddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return (void __iomem *)(u32)paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return ioremap_prot(paddr, size, PAGE_KERNEL_NO_CACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) EXPORT_SYMBOL(ioremap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * ioremap with access flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * Cache semantics wise it is same as ioremap - "forced" uncached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * However unlike vanilla ioremap which bypasses ARC MMU for addresses in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * ARC hardware uncached region, this one still goes thru the MMU as caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * might need finer access control (R/W/X)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) unsigned int off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) unsigned long vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct vm_struct *area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) phys_addr_t end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) pgprot_t prot = __pgprot(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* Don't allow wraparound, zero size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) end = paddr + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if ((!size) || (end < paddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* An early platform driver might end up here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (!slab_is_available())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /* force uncached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) prot = pgprot_noncached(prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /* Mappings have to be page-aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) off = paddr & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) paddr &= PAGE_MASK_PHYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) size = PAGE_ALIGN(end + 1) - paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * Ok, go for it..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) area = get_vm_area(size, VM_IOREMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (!area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) area->phys_addr = paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) vaddr = (unsigned long)area->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) vunmap((void __force *)vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return (void __iomem *)(off + (char __iomem *)vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) EXPORT_SYMBOL(ioremap_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) void iounmap(const void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* weird double cast to handle phys_addr_t > 32 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (arc_uncached_addr_space((phys_addr_t)(u32)addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) vfree((void *)(PAGE_MASK & (unsigned long __force)addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) EXPORT_SYMBOL(iounmap);