^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/arch/arm/mm/ioremap.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Re-map IO memory to kernel address space so that we can access it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * (C) Copyright 1995 1996 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Hacked for ARM by Phil Blundell <philb@gnu.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Hacked to allow all architectures to build, and various cleanups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * by Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * This allows a driver to remap an arbitrary region of bus memory into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * virtual space. One should *only* use readl, writel, memcpy_toio and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * so on with such remapped areas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Because the ARM only has a 32-bit address space we can't address the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * whole of the (physical) PCI space at once. PCI huge-mode addressing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * allows us to circumvent this restriction by splitting PCI space into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * two 2GB chunks and mapping only one at a time into processor memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * We use MMU protection domains to trap any attempt to access the bank
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * that is not currently mapped. (This isn't fully implemented yet.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/cp15.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/cputype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/early_ioremap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/system_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <asm/mach/map.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <asm/mach/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include "mm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) LIST_HEAD(static_vmlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) size_t size, unsigned int mtype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct static_vm *svm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct vm_struct *vm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) list_for_each_entry(svm, &static_vmlist, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) vm = &svm->vm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (!(vm->flags & VM_ARM_STATIC_MAPPING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (vm->phys_addr > paddr ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) paddr + size - 1 > vm->phys_addr + vm->size - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return svm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct static_vm *find_static_vm_vaddr(void *vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct static_vm *svm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct vm_struct *vm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) list_for_each_entry(svm, &static_vmlist, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) vm = &svm->vm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /* static_vmlist is ascending order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (vm->addr > vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return svm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) void __init add_static_vm_early(struct static_vm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct static_vm *curr_svm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct vm_struct *vm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) vm = &svm->vm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) vm_area_add_early(vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) vaddr = vm->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) list_for_each_entry(curr_svm, &static_vmlist, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) vm = &curr_svm->vm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (vm->addr > vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) list_add_tail(&svm->list, &curr_svm->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) int ioremap_page(unsigned long virt, unsigned long phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) const struct mem_type *mtype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) __pgprot(mtype->prot_pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) EXPORT_SYMBOL(ioremap_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) void __check_vmalloc_seq(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) seq = init_mm.context.vmalloc_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) memcpy(pgd_offset(mm, VMALLOC_START),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) pgd_offset_k(VMALLOC_START),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) pgd_index(VMALLOC_START)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) mm->context.vmalloc_seq = seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) } while (seq != init_mm.context.vmalloc_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * Section support is unsafe on SMP - If you iounmap and ioremap a region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * the other CPUs will not see this change until their next context switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * which requires the new ioremap'd region to be referenced, the CPU will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * reference the _old_ region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * mask the size back to 1MB aligned or we will overflow in the loop below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static void unmap_area_sections(unsigned long virt, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) pmd_t *pmdp = pmd_off_k(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) pmd_t pmd = *pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (!pmd_none(pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * Clear the PMD from the page table, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * increment the vmalloc sequence so others
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * notice this change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * Note: this is still racy on SMP machines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) pmd_clear(pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) init_mm.context.vmalloc_seq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * Free the page table, if there was one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) addr += PMD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) pmdp += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) } while (addr < end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * Ensure that the active_mm is up to date - we want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * catch any use-after-iounmap cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) __check_vmalloc_seq(current->active_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) flush_tlb_kernel_range(virt, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) remap_area_sections(unsigned long virt, unsigned long pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) size_t size, const struct mem_type *type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) unsigned long addr = virt, end = virt + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) pmd_t *pmd = pmd_off_k(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * Remove and free any PTE-based mapping, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * sync the current kernel mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) unmap_area_sections(virt, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) pfn += SZ_1M >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) pfn += SZ_1M >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) flush_pmd_entry(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) addr += PMD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) pmd += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) } while (addr < end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) remap_area_supersections(unsigned long virt, unsigned long pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) size_t size, const struct mem_type *type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) unsigned long addr = virt, end = virt + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) pmd_t *pmd = pmd_off_k(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * Remove and free any PTE-based mapping, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * sync the current kernel mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) unmap_area_sections(virt, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) unsigned long super_pmd_val, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) PMD_SECT_SUPER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) pmd[0] = __pmd(super_pmd_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) pmd[1] = __pmd(super_pmd_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) flush_pmd_entry(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) addr += PMD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) pmd += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) } while (addr < end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) unsigned long offset, size_t size, unsigned int mtype, void *caller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) const struct mem_type *type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct vm_struct *area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) phys_addr_t paddr = __pfn_to_phys(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) #ifndef CONFIG_ARM_LPAE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * High mappings must be supersection aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) type = get_mem_type(mtype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (!type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * Page align the mapping size, taking account of any offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) size = PAGE_ALIGN(offset + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * Try to reuse one of the static mapping whenever possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct static_vm *svm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) svm = find_static_vm_paddr(paddr, size, mtype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (svm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) addr = (unsigned long)svm->vm.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) addr += paddr - svm->vm.phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return (void __iomem *) (offset + addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * Don't allow RAM to be mapped with mismatched attributes - this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * causes problems with ARMv6+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (WARN_ON(memblock_is_map_memory(PFN_PHYS(pfn)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) mtype != MT_MEMORY_RW))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) area = get_vm_area_caller(size, VM_IOREMAP, caller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (!area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) addr = (unsigned long)area->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) area->phys_addr = paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (DOMAIN_IO == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) cpu_is_xsc3()) && pfn >= 0x100000 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) area->flags |= VM_ARM_SECTION_MAPPING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) err = remap_area_supersections(addr, pfn, size, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) } else if (!((paddr | size | addr) & ~PMD_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) area->flags |= VM_ARM_SECTION_MAPPING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) err = remap_area_sections(addr, pfn, size, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) err = ioremap_page_range(addr, addr + size, paddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) __pgprot(type->prot_pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) vunmap((void *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) flush_cache_vmap(addr, addr + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return (void __iomem *) (offset + addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) unsigned int mtype, void *caller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) phys_addr_t last_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) unsigned long offset = phys_addr & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) unsigned long pfn = __phys_to_pfn(phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * Don't allow wraparound or zero size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) last_addr = phys_addr + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (!size || last_addr < phys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) caller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * Remap an arbitrary physical address space into the kernel virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * address space. Needed when the kernel wants to access high addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * NOTE! We need to allow non-page-aligned mappings too: we will obviously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * have to convert them into an offset in a page-aligned mapping, but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * caller shouldn't need to know that small detail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) void __iomem *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) unsigned int mtype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) EXPORT_SYMBOL(__arm_ioremap_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) unsigned int, void *) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) __arm_ioremap_caller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) void __iomem *ioremap(resource_size_t res_cookie, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) EXPORT_SYMBOL(ioremap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) EXPORT_SYMBOL(ioremap_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) EXPORT_SYMBOL(ioremap_wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * Remap an arbitrary physical address space into the kernel virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * address space as memory. Needed when the kernel wants to execute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * code in external memory. This is needed for reprogramming source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * clocks that would affect normal memory for example. Please see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) void __iomem *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) unsigned int mtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (cached)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) mtype = MT_MEMORY_RWX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) mtype = MT_MEMORY_RWX_NONCACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return __arm_ioremap_caller(phys_addr, size, mtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return (__force void *)arch_ioremap_caller(phys_addr, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) MT_MEMORY_RW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) void __iounmap(volatile void __iomem *io_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) struct static_vm *svm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /* If this is a static mapping, we must leave it alone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) svm = find_static_vm_vaddr(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct vm_struct *vm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) vm = find_vm_area(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * If this is a section based mapping we need to handle it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * specially as the VM subsystem does not know how to handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * such a beast.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) unmap_area_sections((unsigned long)vm->addr, vm->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) vunmap(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) void iounmap(volatile void __iomem *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) arch_iounmap(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) EXPORT_SYMBOL(iounmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static int pci_ioremap_mem_type = MT_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) void pci_ioremap_set_mem_type(int mem_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) pci_ioremap_mem_type = mem_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) PCI_IO_VIRT_BASE + offset + SZ_64K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) EXPORT_SYMBOL_GPL(pci_ioremap_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * Must be called after early_fixmap_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) void __init early_ioremap_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) early_ioremap_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }