^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * arch/parisc/mm/ioremap.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * (C) Copyright 1995 1996 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * (C) Copyright 2001-2019 Helge Deller <deller@gmx.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Generic mapping function (not visible outside):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * Remap an arbitrary physical address space into the kernel virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * NOTE! We need to allow non-page-aligned mappings too: we will obviously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * have to convert them into an offset in a page-aligned mapping, but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * caller shouldn't need to know that small detail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) void __iomem *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct vm_struct *area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) unsigned long offset, last_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) pgprot_t pgprot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #ifdef CONFIG_EISA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) unsigned long end = phys_addr + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* Support EISA addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if ((phys_addr >= 0x00080000 && end < 0x000fffff) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) (phys_addr >= 0x00500000 && end < 0x03bfffff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) phys_addr |= F_EXTEND(0xfc000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* Don't allow wraparound or zero size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) last_addr = phys_addr + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) if (!size || last_addr < phys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * Don't allow anybody to remap normal RAM that we're using..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (phys_addr < virt_to_phys(high_memory)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) char *t_addr, *t_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) t_addr = __va(phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) t_end = t_addr + (size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) for (page = virt_to_page(t_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) page <= virt_to_page(t_end); page++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if(!PageReserved(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) _PAGE_ACCESSED | _PAGE_NO_CACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * Mappings have to be page-aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) offset = phys_addr & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) phys_addr &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) size = PAGE_ALIGN(last_addr + 1) - phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * Ok, go for it..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) area = get_vm_area(size, VM_IOREMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (!area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) addr = (void __iomem *) area->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) phys_addr, pgprot)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) vunmap(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return (void __iomem *) (offset + (char __iomem *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) EXPORT_SYMBOL(ioremap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) void iounmap(const volatile void __iomem *io_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (is_vmalloc_addr((void *)addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) vunmap((void *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) EXPORT_SYMBOL(iounmap);