^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * OpenRISC ioremap.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Linux architectural port borrowing liberally from similar works of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * others. All original copyrights apply as per the original source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * declaration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Modifications for the OpenRISC architecture:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/kmap_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/fixmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) extern int mem_init_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static unsigned int fixmaps_used __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Remap an arbitrary physical address space into the kernel virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * address space. Needed when the kernel wants to access high addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * NOTE! We need to allow non-page-aligned mappings too: we will obviously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * have to convert them into an offset in a page-aligned mapping, but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * caller shouldn't need to know that small detail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) void __iomem *__ref ioremap(phys_addr_t addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) phys_addr_t p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned long v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned long offset, last_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct vm_struct *area = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* Don't allow wraparound or zero size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) last_addr = addr + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (!size || last_addr < addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * Mappings have to be page-aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) offset = addr & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) p = addr & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) size = PAGE_ALIGN(last_addr + 1) - p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (likely(mem_init_done)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) area = get_vm_area(size, VM_IOREMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (!area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) v = (unsigned long)area->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if ((fixmaps_used + (size >> PAGE_SHIFT)) > FIX_N_IOREMAPS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) v = fix_to_virt(FIX_IOREMAP_BEGIN + fixmaps_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) fixmaps_used += (size >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (ioremap_page_range(v, v + size, p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) __pgprot(pgprot_val(PAGE_KERNEL) | _PAGE_CI))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (likely(mem_init_done))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) vfree(area->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) fixmaps_used -= (size >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return (void __iomem *)(offset + (char *)v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) EXPORT_SYMBOL(ioremap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) void iounmap(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* If the page is from the fixmap pool then we just clear out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * the fixmap mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (unlikely((unsigned long)addr > FIXADDR_START)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* This is a bit broken... we don't really know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * how big the area is so it's difficult to know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * how many fixed pages to invalidate...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * just flush tlb and hope for the best...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * consider this a FIXME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Really we should be clearing out one or more page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * table entries for these virtual addresses so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * future references cause a page fault... for now, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * rely on two things:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * i) this code never gets called on known boards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * ii) invalid accesses to the freed areas aren't made
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return vfree((void *)(PAGE_MASK & (unsigned long)addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) EXPORT_SYMBOL(iounmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * OK, this one's a bit tricky... ioremap can get called before memory is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * initialized (early serial console does this) and will want to alloc a page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * for its mapping. No userspace pages will ever get allocated before memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * is initialized so this applies only to kernel pages. In the event that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * this is called before memory is initialized we allocate the page using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * the memblock infrastructure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (likely(mem_init_done)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (!pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) __func__, PAGE_SIZE, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }