^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/hugetlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <asm/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <asm/tlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <asm/fixmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/mtrr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) EXPORT_SYMBOL(physical_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #ifdef CONFIG_HIGHPTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define PGTABLE_HIGHMEM __GFP_HIGHMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define PGTABLE_HIGHMEM 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #ifndef CONFIG_PARAVIRT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) tlb_remove_page(tlb, table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) gfp_t __userpte_alloc_gfp = GFP_PGTABLE_USER | PGTABLE_HIGHMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) pgtable_t pte_alloc_one(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return __pte_alloc_one(mm, __userpte_alloc_gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static int __init setup_userpte(char *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if (!arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * "userpte=nohigh" disables allocation of user pagetables in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * high memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) if (strcmp(arg, "nohigh") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) early_param("userpte", setup_userpte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) pgtable_pte_page_dtor(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) paravirt_release_pte(page_to_pfn(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) paravirt_tlb_remove_table(tlb, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #if CONFIG_PGTABLE_LEVELS > 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct page *page = virt_to_page(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * NOTE! For PAE, any changes to the top page-directory-pointer-table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * entries need a full cr3 reload to flush.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #ifdef CONFIG_X86_PAE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) tlb->need_flush_all = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) pgtable_pmd_page_dtor(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) paravirt_tlb_remove_table(tlb, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #if CONFIG_PGTABLE_LEVELS > 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) paravirt_tlb_remove_table(tlb, virt_to_page(pud));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #if CONFIG_PGTABLE_LEVELS > 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) paravirt_tlb_remove_table(tlb, virt_to_page(p4d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #endif /* CONFIG_PGTABLE_LEVELS > 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #endif /* CONFIG_PGTABLE_LEVELS > 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #endif /* CONFIG_PGTABLE_LEVELS > 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static inline void pgd_list_add(pgd_t *pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct page *page = virt_to_page(pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) list_add(&page->lru, &pgd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static inline void pgd_list_del(pgd_t *pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct page *page = virt_to_page(pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) list_del(&page->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define UNSHARED_PTRS_PER_PGD \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define MAX_UNSHARED_PTRS_PER_PGD \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) max_t(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) virt_to_page(pgd)->pt_mm = mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct mm_struct *pgd_page_get_mm(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return page->pt_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* If the pgd points to a shared pagetable level (either the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) ptes in non-PAE, or shared PMD in PAE), then just copy the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) references from swapper_pg_dir. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (CONFIG_PGTABLE_LEVELS == 2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) CONFIG_PGTABLE_LEVELS >= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) swapper_pg_dir + KERNEL_PGD_BOUNDARY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) KERNEL_PGD_PTRS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* list required to sync kernel mapping updates */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (!SHARED_KERNEL_PMD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) pgd_set_mm(pgd, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) pgd_list_add(pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static void pgd_dtor(pgd_t *pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (SHARED_KERNEL_PMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) spin_lock(&pgd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) pgd_list_del(pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) spin_unlock(&pgd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * List of all pgd's needed for non-PAE so it can invalidate entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * in both cached and uncached pgd's; not needed for PAE since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * kernel pmd is shared. If PAE were not to share the pmd a similar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * tactic would be needed. This is essentially codepath-based locking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * against pageattr.c; it is the unique case in which a valid change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * of kernel pagetables can't be lazily synchronized by vmalloc faults.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * vmalloc faults work because attached pagetables are never freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * -- nyc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #ifdef CONFIG_X86_PAE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * In PAE mode, we need to do a cr3 reload (=tlb flush) when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * updating the top-level pagetable entries to guarantee the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * processor notices the update. Since this is expensive, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * all 4 top-level entries are used almost immediately in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * new process's life, we just pre-populate them here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * Also, if we're in a paravirt environment where the kernel pmd is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * and initialize the kernel pmds here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define MAX_PREALLOCATED_PMDS MAX_UNSHARED_PTRS_PER_PGD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * We allocate separate PMDs for the kernel part of the user page-table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * when PTI is enabled. We need them to map the per-process LDT into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * user-space page-table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #define PREALLOCATED_USER_PMDS (boot_cpu_has(X86_FEATURE_PTI) ? \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) KERNEL_PGD_PTRS : 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* Note: almost everything apart from _PAGE_PRESENT is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) reserved at the pmd (PDPT) level. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * According to Intel App note "TLBs, Paging-Structure Caches,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * and Their Invalidation", April 2007, document 317080-001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * section 8.1: in PAE mode we explicitly have to flush the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * TLB via cr3 if the top-level pgd is changed...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) flush_tlb_mm(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #else /* !CONFIG_X86_PAE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* No need to prepopulate any pagetable entries in non-PAE modes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #define PREALLOCATED_PMDS 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #define MAX_PREALLOCATED_PMDS 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #define PREALLOCATED_USER_PMDS 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #define MAX_PREALLOCATED_USER_PMDS 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #endif /* CONFIG_X86_PAE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) for (i = 0; i < count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (pmds[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) free_page((unsigned long)pmds[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) mm_dec_nr_pmds(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) bool failed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) gfp_t gfp = GFP_PGTABLE_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (mm == &init_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) gfp &= ~__GFP_ACCOUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) pmd_t *pmd = (pmd_t *)__get_free_page(gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (!pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) failed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) free_page((unsigned long)pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) pmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) failed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) mm_inc_nr_pmds(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) pmds[i] = pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (failed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) free_pmds(mm, pmds, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * Mop up any pmd pages which may still be attached to the pgd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * Normally they will be freed by munmap/exit_mmap, but any pmd we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * preallocate which never got a corresponding vma will need to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * freed manually.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) pgd_t pgd = *pgdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (pgd_val(pgd) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) pgd_clear(pgdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) pmd_free(mm, pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) mm_dec_nr_pmds(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) for (i = 0; i < PREALLOCATED_PMDS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) mop_up_one_pmd(mm, &pgdp[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) #ifdef CONFIG_PAGE_TABLE_ISOLATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (!boot_cpu_has(X86_FEATURE_PTI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) pgdp = kernel_to_user_pgdp(pgdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) for (i = 0; i < PREALLOCATED_USER_PMDS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) mop_up_one_pmd(mm, &pgdp[i + KERNEL_PGD_BOUNDARY]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) p4d = p4d_offset(pgd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) pud = pud_offset(p4d, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) pmd_t *pmd = pmds[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (i >= KERNEL_PGD_BOUNDARY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) sizeof(pmd_t) * PTRS_PER_PMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) pud_populate(mm, pud, pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) #ifdef CONFIG_PAGE_TABLE_ISOLATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) pgd_t *k_pgd, pmd_t *pmds[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) pgd_t *s_pgd = kernel_to_user_pgdp(swapper_pg_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) p4d_t *u_p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) pud_t *u_pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) u_p4d = p4d_offset(u_pgd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) u_pud = pud_offset(u_p4d, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) s_pgd += KERNEL_PGD_BOUNDARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) u_pud += KERNEL_PGD_BOUNDARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) for (i = 0; i < PREALLOCATED_USER_PMDS; i++, u_pud++, s_pgd++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) pmd_t *pmd = pmds[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) memcpy(pmd, (pmd_t *)pgd_page_vaddr(*s_pgd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) sizeof(pmd_t) * PTRS_PER_PMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) pud_populate(mm, u_pud, pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) pgd_t *k_pgd, pmd_t *pmds[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * assumes that pgd should be in one page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * But kernel with PAE paging that is not running as a Xen domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * only needs to allocate 32 bytes for pgd instead of one page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) #ifdef CONFIG_X86_PAE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) #define PGD_ALIGN 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static struct kmem_cache *pgd_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) void __init pgtable_cache_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * When PAE kernel is running as a Xen domain, it does not use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * shared kernel pmd. And this requires a whole page for pgd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (!SHARED_KERNEL_PMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * when PAE kernel is not running as a Xen domain, it uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * shared kernel pmd. Shared kernel pmd does not require a whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * page for pgd. We are able to just allocate a 32-byte for pgd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * During boot time, we create a 32-byte slab for pgd table allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) SLAB_PANIC, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static inline pgd_t *_pgd_alloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * We allocate one page for pgd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (!SHARED_KERNEL_PMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) PGD_ALLOCATION_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * Now PAE kernel is not running as a Xen domain. We can allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * a 32-byte slab for pgd to save memory space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return kmem_cache_alloc(pgd_cache, GFP_PGTABLE_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static inline void _pgd_free(pgd_t *pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (!SHARED_KERNEL_PMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) kmem_cache_free(pgd_cache, pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static inline pgd_t *_pgd_alloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) PGD_ALLOCATION_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) static inline void _pgd_free(pgd_t *pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) #endif /* CONFIG_X86_PAE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) pgd_t *pgd_alloc(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) pmd_t *pmds[MAX_PREALLOCATED_PMDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) pgd = _pgd_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (pgd == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) mm->pgd = pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (preallocate_pmds(mm, pmds, PREALLOCATED_PMDS) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) goto out_free_pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (preallocate_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) goto out_free_pmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (paravirt_pgd_alloc(mm) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) goto out_free_user_pmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * Make sure that pre-populating the pmds is atomic with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * respect to anything walking the pgd_list, so that they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * never see a partially populated pgd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) spin_lock(&pgd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) pgd_ctor(mm, pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) pgd_prepopulate_pmd(mm, pgd, pmds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) pgd_prepopulate_user_pmd(mm, pgd, u_pmds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) spin_unlock(&pgd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) out_free_user_pmds:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) free_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) out_free_pmds:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) free_pmds(mm, pmds, PREALLOCATED_PMDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) out_free_pgd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) _pgd_free(pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) void pgd_free(struct mm_struct *mm, pgd_t *pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) pgd_mop_up_pmds(mm, pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) pgd_dtor(pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) paravirt_pgd_free(mm, pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) _pgd_free(pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * Used to set accessed or dirty bits in the page table entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * on other architectures. On x86, the accessed and dirty bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * are tracked by hardware. However, do_wp_page calls this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * to also make the pte writeable at the same time the dirty bit is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * set. In that case we do actually need to write the PTE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) int ptep_set_access_flags(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) unsigned long address, pte_t *ptep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) pte_t entry, int dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) int changed = !pte_same(*ptep, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (changed && dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) set_pte(ptep, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) int pmdp_set_access_flags(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) unsigned long address, pmd_t *pmdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) pmd_t entry, int dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) int changed = !pmd_same(*pmdp, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) VM_BUG_ON(address & ~HPAGE_PMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (changed && dirty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) set_pmd(pmdp, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * We had a write-protection fault here and changed the pmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * to to more permissive. No need to flush the TLB for that,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * #PF is architecturally guaranteed to do that and in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * worst-case we'll generate a spurious fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) pud_t *pudp, pud_t entry, int dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) int changed = !pud_same(*pudp, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) VM_BUG_ON(address & ~HPAGE_PUD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (changed && dirty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) set_pud(pudp, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * We had a write-protection fault here and changed the pud
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * to to more permissive. No need to flush the TLB for that,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * #PF is architecturally guaranteed to do that and in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * worst-case we'll generate a spurious fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) int ptep_test_and_clear_young(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) unsigned long addr, pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (pte_young(*ptep))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) (unsigned long *) &ptep->pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) int pmdp_test_and_clear_young(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) unsigned long addr, pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (pmd_young(*pmdp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) (unsigned long *)pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) int pudp_test_and_clear_young(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) unsigned long addr, pud_t *pudp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (pud_young(*pudp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) (unsigned long *)pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) int ptep_clear_flush_young(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) unsigned long address, pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * On x86 CPUs, clearing the accessed bit without a TLB flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * doesn't cause data corruption. [ It could cause incorrect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * page aging and the (mistaken) reclaim of hot pages, but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * chance of that should be relatively low. ]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * So as a performance optimization don't flush the TLB when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * clearing the accessed bit, it will eventually be flushed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * a context switch or a VM operation anyway. [ In the rare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * event of it not getting flushed for a long time the delay
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * shouldn't really matter because there's no real memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * pressure for swapout to react to. ]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) return ptep_test_and_clear_young(vma, address, ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) int pmdp_clear_flush_young(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) unsigned long address, pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) int young;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) VM_BUG_ON(address & ~HPAGE_PMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) young = pmdp_test_and_clear_young(vma, address, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (young)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) return young;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * reserve_top_address - reserves a hole in the top of kernel address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * @reserve - size of hole to reserve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * Can be used to relocate the fixmap area and poke a hole in the top
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * of kernel address space to make room for a hypervisor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) void __init reserve_top_address(unsigned long reserve)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) BUG_ON(fixmaps_set > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) -reserve, __FIXADDR_TOP + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) int fixmaps_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) unsigned long address = __fix_to_virt(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * Ensure that the static initial page tables are covering the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * fixmap completely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) BUILD_BUG_ON(__end_of_permanent_fixed_addresses >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) (FIXMAP_PMD_NUM * PTRS_PER_PTE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (idx >= __end_of_fixed_addresses) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) set_pte_vaddr(address, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) fixmaps_set++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) void native_set_fixmap(unsigned /* enum fixed_addresses */ idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) phys_addr_t phys, pgprot_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /* Sanitize 'prot' against any unsupported bits: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) pgprot_val(flags) &= __default_kernel_pte_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) #ifdef CONFIG_X86_5LEVEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * p4d_set_huge - setup kernel P4D mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * No 512GB pages yet -- always return 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * p4d_clear_huge - clear kernel P4D mapping when it is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * No 512GB pages yet -- always return 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) int p4d_clear_huge(p4d_t *p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * pud_set_huge - setup kernel PUD mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * MTRRs can override PAT memory types with 4KiB granularity. Therefore, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * function sets up a huge page only if any of the following conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * - MTRRs are disabled, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * - MTRRs are enabled and the range is completely covered by a single MTRR, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * - MTRRs are enabled and the corresponding MTRR memory type is WB, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * has no effect on the requested PAT memory type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * page mapping attempt fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * Returns 1 on success and 0 on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) u8 mtrr, uniform;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) (mtrr != MTRR_TYPE_WRBACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /* Bail out if we are we on a populated non-leaf entry: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (pud_present(*pud) && !pud_huge(*pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) set_pte((pte_t *)pud, pfn_pte(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) (u64)addr >> PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) __pgprot(protval_4k_2_large(pgprot_val(prot)) | _PAGE_PSE)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * pmd_set_huge - setup kernel PMD mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * See text over pud_set_huge() above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * Returns 1 on success and 0 on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) u8 mtrr, uniform;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) (mtrr != MTRR_TYPE_WRBACK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) __func__, addr, addr + PMD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) /* Bail out if we are we on a populated non-leaf entry: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) if (pmd_present(*pmd) && !pmd_huge(*pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) set_pte((pte_t *)pmd, pfn_pte(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) (u64)addr >> PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) __pgprot(protval_4k_2_large(pgprot_val(prot)) | _PAGE_PSE)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * pud_clear_huge - clear kernel PUD mapping when it is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * Returns 1 on success and 0 on failure (no PUD map is found).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) int pud_clear_huge(pud_t *pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (pud_large(*pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) pud_clear(pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * pmd_clear_huge - clear kernel PMD mapping when it is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * Returns 1 on success and 0 on failure (no PMD map is found).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) int pmd_clear_huge(pmd_t *pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (pmd_large(*pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) pmd_clear(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * Until we support 512GB pages, skip them in the vmap area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * pud_free_pmd_page - Clear pud entry and free pmd page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * @pud: Pointer to a PUD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * @addr: Virtual address associated with pud.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * Context: The pud range has been unmapped and TLB purged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * Return: 1 if clearing the entry succeeded. 0 otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * NOTE: Callers must allow a single page allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) int pud_free_pmd_page(pud_t *pud, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) pmd_t *pmd, *pmd_sv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) pmd = (pmd_t *)pud_page_vaddr(*pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (!pmd_sv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) for (i = 0; i < PTRS_PER_PMD; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) pmd_sv[i] = pmd[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (!pmd_none(pmd[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) pmd_clear(&pmd[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) pud_clear(pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /* INVLPG to clear all paging-structure caches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) for (i = 0; i < PTRS_PER_PMD; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (!pmd_none(pmd_sv[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) free_page((unsigned long)pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) free_page((unsigned long)pmd_sv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) pgtable_pmd_page_dtor(virt_to_page(pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) free_page((unsigned long)pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * pmd_free_pte_page - Clear pmd entry and free pte page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * @pmd: Pointer to a PMD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * @addr: Virtual address associated with pmd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * Context: The pmd range has been unmapped and TLB purged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * Return: 1 if clearing the entry succeeded. 0 otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) pte = (pte_t *)pmd_page_vaddr(*pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) pmd_clear(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) /* INVLPG to clear all paging-structure caches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) free_page((unsigned long)pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) #else /* !CONFIG_X86_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) int pud_free_pmd_page(pud_t *pud, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) return pud_none(*pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * Disable free page handling on x86-PAE. This assures that ioremap()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * does not update sync'd pmd entries. See vmalloc_sync_one().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) return pmd_none(*pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) #endif /* CONFIG_X86_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */