^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Initialize MMU support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1998-2003 Hewlett-Packard Co
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * David Mosberger-Tang <davidm@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/dmar.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/efi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/elf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/mmzone.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/personality.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/kexec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/swiotlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/numa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/patch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/sal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/tlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/mca.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) extern void ia64_tlb_init (void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #ifdef CONFIG_VIRTUAL_MEM_MAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) unsigned long VMALLOC_END = VMALLOC_END_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) EXPORT_SYMBOL(VMALLOC_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct page *vmem_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) EXPORT_SYMBOL(vmem_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct page *zero_page_memmap_ptr; /* map entry for zero page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) EXPORT_SYMBOL(zero_page_memmap_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) __ia64_sync_icache_dcache (pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) page = pte_page(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) addr = (unsigned long) page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (test_bit(PG_arch_1, &page->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return; /* i-cache is already coherent with d-cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) flush_icache_range(addr, addr + page_size(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) set_bit(PG_arch_1, &page->flags); /* mark page as clean */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * Since DMA is i-cache coherent, any (complete) pages that were written via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * flush them when they get mapped into an executable vm-area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) unsigned long pfn = PHYS_PFN(paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) set_bit(PG_arch_1, &pfn_to_page(pfn)->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) } while (++pfn <= PHYS_PFN(paddr + size - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) ia64_set_rbs_bot (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (stack_size > MAX_USER_STACK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) stack_size = MAX_USER_STACK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * This performs some platform-dependent address space initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * On IA-64, we want to setup the VM area for the register backing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * store (which grows upwards) and install the gateway page which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * used for signal trampolines, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) ia64_init_addr_space (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) ia64_set_rbs_bot();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * the problem. When the process attempts to write to the register backing store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * for the first time, it will get a SEGFAULT in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) vma = vm_area_alloc(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (vma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) vma_set_anonymous(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) vma->vm_end = vma->vm_start + PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) mmap_write_lock(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (insert_vm_struct(current->mm, vma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) mmap_write_unlock(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) vm_area_free(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) mmap_write_unlock(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (!(current->personality & MMAP_PAGE_ZERO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) vma = vm_area_alloc(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (vma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) vma_set_anonymous(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) vma->vm_end = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) VM_DONTEXPAND | VM_DONTDUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) mmap_write_lock(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (insert_vm_struct(current->mm, vma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) mmap_write_unlock(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) vm_area_free(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) mmap_write_unlock(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) free_initmem (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) -1, "unused kernel");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) free_initrd_mem (unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * EFI uses 4KB pages while the kernel can use 4KB or bigger.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * Thus EFI and the kernel may have different page sizes. It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * therefore possible to have the initrd share the same page as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * the end of the kernel (given current setup).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * To avoid freeing/using the wrong page (kernel sized) we:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * - align up the beginning of initrd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * - align down the end of initrd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * |=============| a000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * | | 9000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * |/////////////|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * |/////////////|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * |=============| 8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * |///INITRD////|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * |/////////////|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * |/////////////| 7000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * |KKKKKKKKKKKKK|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * |=============| 6000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * |KKKKKKKKKKKKK|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * |KKKKKKKKKKKKK|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * K=kernel using 8KB pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * In this example, we must free page 8000 ONLY. So we must align up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * initrd_start and keep initrd_end as is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) start = PAGE_ALIGN(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) end = end & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (start < end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) for (; start < end; start += PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (!virt_addr_valid(start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) free_reserved_page(virt_to_page(start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * This installs a clean page in the kernel's page table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static struct page * __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) p4d = p4d_alloc(&init_mm, pgd, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (!p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) pud = pud_alloc(&init_mm, p4d, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (!pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) pmd = pmd_alloc(&init_mm, pud, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (!pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) pte = pte_alloc_kernel(pmd, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (!pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (!pte_none(*pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) set_pte(pte, mk_pte(page, pgprot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* no need for flush_tlb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) setup_gate (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * Map the gate page twice: once read-only to export the ELF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * headers etc. and once execute-only page to enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * privilege-promotion via "epc":
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) page = virt_to_page(ia64_imva(__start_gate_section));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #ifdef HAVE_BUGGY_SEGREL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /* Fill in the holes (if any) with read-only zero pages: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) for (addr = GATE_ADDR + PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) addr < GATE_ADDR + PERCPU_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) addr += PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) put_kernel_page(ZERO_PAGE(0), addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) PAGE_READONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) PAGE_READONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) ia64_patch_gate();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static struct vm_area_struct gate_vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static int __init gate_vma_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) vma_init(&gate_vma, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) gate_vma.vm_start = FIXADDR_USER_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) gate_vma.vm_end = FIXADDR_USER_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) gate_vma.vm_page_prot = __P101;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) __initcall(gate_vma_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return &gate_vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) int in_gate_area_no_mm(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) int in_gate_area(struct mm_struct *mm, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return in_gate_area_no_mm(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) void ia64_mmu_init(void *my_cpu_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) unsigned long pta, impl_va_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) extern void tlb_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) #ifdef CONFIG_DISABLE_VHPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) # define VHPT_ENABLE_BIT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) # define VHPT_ENABLE_BIT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * address space. The IA-64 architecture guarantees that at least 50 bits of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * virtual address space are implemented but if we pick a large enough page size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * (e.g., 64KB), the mapped address space is big enough that it will overlap with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * problem in practice. Alternatively, we could truncate the top of the mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * address space to not permit mappings that would overlap with the VMLPT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * --davidm 00/12/06
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) # define pte_bits 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) # define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * The virtual page table has to cover the entire implemented address space within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * a region even though not all of this space may be mappable. The reason for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * this is that the Access bit and Dirty bit fault handlers perform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * non-speculative accesses to the virtual page table, so the address range of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * virtual page table itself needs to be covered by virtual page table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) # define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) # define POW2(n) (1ULL << (n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (impl_va_bits < 51 || impl_va_bits > 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * which must fit into "vmlpt_bits - pte_bits" slots. Second half of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * the test makes sure that our mapped space doesn't overlap the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * unimplemented hole in the middle of the region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) (mapped_space_bits > impl_va_bits - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) panic("Cannot build a big enough virtual-linear page table"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) " to cover mapped address space.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) " Try using a smaller page size.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /* place the VMLPT at the end of each page-table mapped region: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) pta = POW2(61) - POW2(vmlpt_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * Set the (virtually mapped linear) page table address. Bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * 8 selects between the short and long format, bits 2-7 the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * size of the table, and bit 0 whether the VHPT walker is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) ia64_tlb_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) #ifdef CONFIG_HUGETLB_PAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) ia64_srlz_d();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) #ifdef CONFIG_VIRTUAL_MEM_MAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) int vmemmap_find_next_valid_pfn(int node, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) unsigned long end_address, hole_next_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) unsigned long stop_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) pg_data_t *pgdat = NODE_DATA(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) end_address = PAGE_ALIGN(end_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) stop_address = (unsigned long) &vmem_map[pgdat_end_pfn(pgdat)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) pgd = pgd_offset_k(end_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (pgd_none(*pgd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) end_address += PGDIR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) p4d = p4d_offset(pgd, end_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (p4d_none(*p4d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) end_address += P4D_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) pud = pud_offset(p4d, end_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (pud_none(*pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) end_address += PUD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) pmd = pmd_offset(pud, end_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (pmd_none(*pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) end_address += PMD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) pte = pte_offset_kernel(pmd, end_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) retry_pte:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (pte_none(*pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) end_address += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) pte++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if ((end_address < stop_address) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) goto retry_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /* Found next valid vmem_map page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) } while (end_address < stop_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) end_address = min(end_address, stop_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) hole_next_pfn = end_address / sizeof(struct page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return hole_next_pfn - pgdat->node_start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) unsigned long address, start_page, end_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct page *map_start, *map_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) int node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) start_page = (unsigned long) map_start & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) end_page = PAGE_ALIGN((unsigned long) map_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) node = paddr_to_nid(__pa(start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) for (address = start_page; address < end_page; address += PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) pgd = pgd_offset_k(address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (pgd_none(*pgd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) p4d = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (!p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) pgd_populate(&init_mm, pgd, p4d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) p4d = p4d_offset(pgd, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (p4d_none(*p4d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) pud = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (!pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) p4d_populate(&init_mm, p4d, pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) pud = pud_offset(p4d, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (pud_none(*pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) pmd = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (!pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) pud_populate(&init_mm, pud, pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) pmd = pmd_offset(pud, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (pmd_none(*pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) pte = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (!pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) pmd_populate_kernel(&init_mm, pmd, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) pte = pte_offset_kernel(pmd, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (pte_none(*pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) void *page = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) set_pte(pte, pfn_pte(__pa(page) >> PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) PAGE_KERNEL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) err_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) __func__, PAGE_SIZE, PAGE_SIZE, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct memmap_init_callback_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct page *start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct page *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) int nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) unsigned long zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) static int __meminit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) virtual_memmap_init(u64 start, u64 end, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) struct memmap_init_callback_data *args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct page *map_start, *map_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) args = (struct memmap_init_callback_data *) arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (map_start < args->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) map_start = args->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (map_end > args->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) map_end = args->end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * We have to initialize "out of bounds" struct page elements that fit completely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * on the same pages that were allocated for the "in bounds" elements because they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * may be referenced later (and found to be "reserved").
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) / sizeof(struct page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (map_start < map_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) memmap_init_zone((unsigned long)(map_end - map_start),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) args->nid, args->zone, page_to_pfn(map_start), page_to_pfn(map_end),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) void __meminit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) arch_memmap_init (unsigned long size, int nid, unsigned long zone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) unsigned long start_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (!vmem_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) memmap_init_zone(size, nid, zone, start_pfn, start_pfn + size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct page *start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) struct memmap_init_callback_data args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) start = pfn_to_page(start_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) args.start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) args.end = start + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) args.nid = nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) args.zone = zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) efi_memmap_walk(virtual_memmap_init, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) void __init memmap_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) ia64_pfn_valid (unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) char byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct page *pg = pfn_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return (__get_user(byte, (char __user *) pg) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) EXPORT_SYMBOL(ia64_pfn_valid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) int __init find_largest_hole(u64 start, u64 end, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) u64 *max_gap = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) static u64 last_end = PAGE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) /* NOTE: this algorithm assumes efi memmap table is ordered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (*max_gap < (start - last_end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) *max_gap = start - last_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) last_end = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) #endif /* CONFIG_VIRTUAL_MEM_MAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) int __init register_active_ranges(u64 start, u64 len, int nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) u64 end = start + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) #ifdef CONFIG_KEXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (start > crashk_res.start && start < crashk_res.end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) start = crashk_res.end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (end > crashk_res.start && end < crashk_res.end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) end = crashk_res.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (start < end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) memblock_add_node(__pa(start), end - start, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) find_max_min_low_pfn (u64 start, u64 end, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) unsigned long pfn_start, pfn_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) #ifdef CONFIG_FLATMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) min_low_pfn = min(min_low_pfn, pfn_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) max_low_pfn = max(max_low_pfn, pfn_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * system call handler. When this option is in effect, all fsyscalls will end up bubbling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * down into the kernel and calling the normal (heavy-weight) syscall handler. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * useful for performance testing, but conceivably could also come in handy for debugging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * purposes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) static int nolwsys __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) nolwsys_setup (char *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) nolwsys = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) __setup("nolwsys", nolwsys_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) mem_init (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * This needs to be called _after_ the command line has been parsed but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * _before_ any drivers that may need the PCI DMA interface are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * initialized or bootmem has been freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) #ifdef CONFIG_INTEL_IOMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) detect_intel_iommu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (!iommu_detected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) #ifdef CONFIG_SWIOTLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) swiotlb_init(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) #ifdef CONFIG_FLATMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) BUG_ON(!mem_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) set_max_mapnr(max_low_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) high_memory = __va(max_low_pfn * PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) memblock_free_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) mem_init_print_info(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * For fsyscall entrpoints with no light-weight handler, use the ordinary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * code can tell them apart.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) for (i = 0; i < NR_syscalls; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) extern unsigned long fsyscall_table[NR_syscalls];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) extern unsigned long sys_call_table[NR_syscalls];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (!fsyscall_table[i] || nolwsys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) fsyscall_table[i] = sys_call_table[i] | 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) setup_gate();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) #ifdef CONFIG_MEMORY_HOTPLUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) int arch_add_memory(int nid, u64 start, u64 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) struct mhp_params *params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) unsigned long start_pfn = start >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) unsigned long nr_pages = size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) ret = __add_pages(nid, start_pfn, nr_pages, params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) printk("%s: Problem encountered in __add_pages() as ret=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) void arch_remove_memory(int nid, u64 start, u64 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) struct vmem_altmap *altmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) unsigned long start_pfn = start >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) unsigned long nr_pages = size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) __remove_pages(start_pfn, nr_pages, altmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) #endif