^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * HIGHMEM API:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * kmap() API provides sleep semantics hence referred to as "permanent maps"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * It allows mapping LAST_PKMAP pages, using @last_pkmap_nr as the cursor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * for book-keeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * kmap_atomic() can't sleep (calls pagefault_disable()), thus it provides
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * shortlived ala "temporary mappings" which historically were implemented as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * fixmaps (compile time addr etc). Their book-keeping is done per cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Both these facts combined (preemption disabled and per-cpu allocation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * means the total number of concurrent fixmaps will be limited to max
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * such allocations in a single control path. Thus KM_TYPE_NR (another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * historic relic) is a small'ish number which caps max percpu fixmaps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * ARC HIGHMEM Details
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * - the kernel vaddr space from 0x7z to 0x8z (currently used by vmalloc/module)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * is now shared between vmalloc and kmap (non overlapping though)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * - Both fixmap/pkmap use a dedicated page table each, hooked up to swapper PGD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * 2M of kvaddr space for typical config (8K page and 11:8:13 traversal split)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * - fixmap anyhow needs a limited number of mappings. So 2M kvaddr == 256 PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * slots across NR_CPUS would be more than sufficient (generic code defines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * KM_TYPE_NR as 20).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * - pkmap being preemptible, in theory could do with more than 256 concurrent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * the PGD and only works with a single page table @pkmap_page_table, hence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * sets the limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) extern pte_t * pkmap_page_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static pte_t * fixmap_page_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) int idx, cpu_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned long vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) cpu_idx = kmap_atomic_idx_push();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) vaddr = FIXMAP_ADDR(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) set_pte_at(&init_mm, vaddr, fixmap_page_table + idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) mk_pte(page, prot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return (void *)vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) EXPORT_SYMBOL(kmap_atomic_high_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) void kunmap_atomic_high(void *kv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) unsigned long kvaddr = (unsigned long)kv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (kvaddr >= FIXMAP_BASE && kvaddr < (FIXMAP_BASE + FIXMAP_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * Because preemption is disabled, this vaddr can be associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * with the current allocated index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * But in case of multiple live kmap_atomic(), it still relies on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * callers to unmap in right order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) int cpu_idx = kmap_atomic_idx();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) WARN_ON(kvaddr != FIXMAP_ADDR(idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) pte_clear(&init_mm, kvaddr, fixmap_page_table + idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) local_flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) kmap_atomic_idx_pop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) EXPORT_SYMBOL(kunmap_atomic_high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) pmd_t *pmd_k = pmd_off_k(kvaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) pte_t *pte_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (!pte_k)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) __func__, PAGE_SIZE, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) pmd_populate_kernel(&init_mm, pmd_k, pte_k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return pte_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) void __init kmap_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* Due to recursive include hell, we can't do this in processor.h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) BUILD_BUG_ON(KM_TYPE_NR > PTRS_PER_PTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) fixmap_page_table = alloc_kmap_pgtable(FIXMAP_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }