^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * License. See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * (C) Copyright 1995 1996 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * (C) Copyright 2001, 2002 Ralf Baechle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/addrspace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/mm_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <ioremap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) for (i = 0; i < nr_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) if (pfn_valid(start_pfn + i) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) !PageReserved(pfn_to_page(start_pfn + i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * ioremap_prot - map bus memory into CPU space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * @phys_addr: bus address of the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * @size: size of the resource to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * ioremap_prot gives the caller control over cache coherency attributes (CCA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) unsigned long prot_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) unsigned long flags = prot_val & _CACHE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) unsigned long offset, pfn, last_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct vm_struct *area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) phys_addr_t last_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) unsigned long vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) void __iomem *cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) cpu_addr = plat_ioremap(phys_addr, size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (cpu_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) phys_addr = fixup_bigphys_addr(phys_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* Don't allow wraparound or zero size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) last_addr = phys_addr + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (!size || last_addr < phys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * Map uncached objects in the low 512mb of address space using KSEG1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * otherwise map using page tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) flags == _CACHE_UNCACHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return (void __iomem *) CKSEG1ADDR(phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * Don't allow anybody to remap RAM that may be allocated by the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * allocator, since that could lead to races & data clobbering.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) pfn = PFN_DOWN(phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) last_pfn = PFN_DOWN(last_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) __ioremap_check_ram) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) &phys_addr, &last_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * Mappings have to be page-aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) offset = phys_addr & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) phys_addr &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) size = PAGE_ALIGN(last_addr + 1) - phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * Ok, go for it..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) area = get_vm_area(size, VM_IOREMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (!area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) vaddr = (unsigned long)area->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) flags |= _PAGE_GLOBAL | _PAGE_PRESENT | __READABLE | __WRITEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (ioremap_page_range(vaddr, vaddr + size, phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) __pgprot(flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) free_vm_area(area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return (void __iomem *)(vaddr + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) EXPORT_SYMBOL(ioremap_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) void iounmap(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (!plat_iounmap(addr) && !IS_KSEG1(addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) vunmap((void *)((unsigned long)addr & PAGE_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) EXPORT_SYMBOL(iounmap);