^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/swap.h> /* for totalram_pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) unsigned long vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) int idx, type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) type = kmap_atomic_idx_push();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) idx = type + KM_TYPE_NR*smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) BUG_ON(!pte_none(*(kmap_pte-idx)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) set_pte(kmap_pte-idx, mk_pte(page, prot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) arch_flush_lazy_mmu_mode();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) return (void *)vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) EXPORT_SYMBOL(kmap_atomic_high_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * This is the same as kmap_atomic() but can map memory that doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * have a struct page associated with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) void *kmap_atomic_pfn(unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) return kmap_atomic_prot_pfn(pfn, kmap_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) void kunmap_atomic_high(void *kvaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) int idx, type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) type = kmap_atomic_idx();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) idx = type + KM_TYPE_NR * smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #ifdef CONFIG_DEBUG_HIGHMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * Force other mappings to Oops if they'll try to access this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * pte without first remap it. Keeping stale mappings around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * is a bad idea also, in case the page changes cacheability
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * attributes or becomes a protected page in a hypervisor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) kpte_clear_flush(kmap_pte-idx, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) kmap_atomic_idx_pop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) arch_flush_lazy_mmu_mode();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #ifdef CONFIG_DEBUG_HIGHMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) BUG_ON(vaddr < PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) BUG_ON(vaddr >= (unsigned long)high_memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) EXPORT_SYMBOL(kunmap_atomic_high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) void __init set_highmem_pages_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct zone *zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) int nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * Explicitly reset zone->managed_pages because set_highmem_pages_init()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * is invoked before memblock_free_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) reset_all_zones_managed_pages();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) for_each_zone(zone) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) unsigned long zone_start_pfn, zone_end_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (!is_highmem(zone))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) zone_start_pfn = zone->zone_start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) zone_end_pfn = zone_start_pfn + zone->spanned_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) nid = zone_to_nid(zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) zone->name, nid, zone_start_pfn, zone_end_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) add_highpages_with_active_regions(nid, zone_start_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) zone_end_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }