^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * PPC Huge TLB Page Support for Kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2003 David Gibson, IBM Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Based on the IA-32 version:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/hugetlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of_fdt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/swapops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/kmemleak.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/tlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/hugetlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/pte-walk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) bool hugetlb_disabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define hugepd_none(hpd) (hpd_val(hpd) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define PTE_T_ORDER (__builtin_ffs(sizeof(pte_basic_t)) - \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) __builtin_ffs(sizeof(void *)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * Only called for hugetlbfs pages, hence can ignore THP and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * irq disabled walk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return __find_linux_pte(mm->pgd, addr, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) unsigned long address, unsigned int pdshift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) unsigned int pshift, spinlock_t *ptl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct kmem_cache *cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) pte_t *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) int num_hugepd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (pshift >= pdshift) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) cachep = PGT_CACHE(PTE_T_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) num_hugepd = 1 << (pshift - pdshift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) cachep = PGT_CACHE(pdshift - pshift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) num_hugepd = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (!cachep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) WARN_ONCE(1, "No page table cache created for hugetlb tables");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) BUG_ON(pshift > HUGEPD_SHIFT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * Make sure other cpus find the hugepd set only after a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * properly initialized page table is visible to them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * For more details look for comment in __pte_alloc().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) spin_lock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * We have multiple higher-level entries that point to the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * actual pte location. Fill in each as we go and backtrack on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * We need all of these so the DTLB pgtable walk code can find the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * right higher-level entry without knowing if it's a hugepage or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) for (i = 0; i < num_hugepd; i++, hpdp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (unlikely(!hugepd_none(*hpdp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) hugepd_populate(hpdp, new, pshift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* If we bailed from the for loop early, an error occurred, clean up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (i < num_hugepd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) for (i = i - 1 ; i >= 0; i--, hpdp--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) *hpdp = __hugepd(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) kmem_cache_free(cachep, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) kmemleak_ignore(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) spin_unlock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * At this point we do the placement change only for BOOK3S 64. This would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * possibly work on other subarchs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) unsigned long addr, unsigned long sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) pgd_t *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) p4d_t *p4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) pud_t *pu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) pmd_t *pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) hugepd_t *hpdp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) unsigned pshift = __ffs(sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) unsigned pdshift = PGDIR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) addr &= ~(sz-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) pg = pgd_offset(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) p4 = p4d_offset(pg, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (pshift == PGDIR_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* 16GB huge page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return (pte_t *) p4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) else if (pshift > PUD_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * We need to use hugepd table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) ptl = &mm->page_table_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) hpdp = (hugepd_t *)p4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) pdshift = PUD_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) pu = pud_alloc(mm, p4, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (!pu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (pshift == PUD_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return (pte_t *)pu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) else if (pshift > PMD_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) ptl = pud_lockptr(mm, pu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) hpdp = (hugepd_t *)pu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) pdshift = PMD_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) pm = pmd_alloc(mm, pu, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (!pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (pshift == PMD_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* 16MB hugepage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return (pte_t *)pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) ptl = pmd_lockptr(mm, pm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) hpdp = (hugepd_t *)pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (pshift >= PGDIR_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) ptl = &mm->page_table_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) hpdp = (hugepd_t *)p4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) pdshift = PUD_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) pu = pud_alloc(mm, p4, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (!pu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (pshift >= PUD_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) ptl = pud_lockptr(mm, pu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) hpdp = (hugepd_t *)pu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) pdshift = PMD_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) pm = pmd_alloc(mm, pu, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (!pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) ptl = pmd_lockptr(mm, pm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) hpdp = (hugepd_t *)pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (!hpdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (IS_ENABLED(CONFIG_PPC_8xx) && pshift < PMD_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return pte_alloc_map(mm, (pmd_t *)hpdp, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) pdshift, pshift, ptl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return hugepte_offset(*hpdp, addr, pdshift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * Tracks gpages after the device tree is scanned and before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * huge_boot_pages list is ready on pseries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #define MAX_NUMBER_GPAGES 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) __initdata static u64 gpage_freearray[MAX_NUMBER_GPAGES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) __initdata static unsigned nr_gpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * Build list of addresses of gigantic pages. This function is used in early
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * boot before the buddy allocator is setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) void __init pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) while (number_of_pages > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) gpage_freearray[nr_gpages] = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) nr_gpages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) number_of_pages--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) addr += page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct huge_bootmem_page *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (nr_gpages == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) m = phys_to_virt(gpage_freearray[--nr_gpages]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) gpage_freearray[nr_gpages] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) list_add(&m->list, &huge_boot_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) m->hstate = hstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) int __init alloc_bootmem_huge_page(struct hstate *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return pseries_alloc_bootmem_huge_page(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return __alloc_bootmem_huge_page(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) #ifndef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) #define HUGEPD_FREELIST_SIZE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct hugepd_freelist {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) unsigned int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) void *ptes[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static void hugepd_free_rcu_callback(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct hugepd_freelist *batch =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) container_of(head, struct hugepd_freelist, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) for (i = 0; i < batch->index; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) kmem_cache_free(PGT_CACHE(PTE_T_ORDER), batch->ptes[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) free_page((unsigned long)batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct hugepd_freelist **batchp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) batchp = &get_cpu_var(hugepd_freelist_cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (atomic_read(&tlb->mm->mm_users) < 2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) mm_is_thread_local(tlb->mm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) kmem_cache_free(PGT_CACHE(PTE_T_ORDER), hugepte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) put_cpu_var(hugepd_freelist_cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (*batchp == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) (*batchp)->index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) (*batchp)->ptes[(*batchp)->index++] = hugepte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) call_rcu(&(*batchp)->rcu, hugepd_free_rcu_callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) *batchp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) put_cpu_var(hugepd_freelist_cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) unsigned long start, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) unsigned long floor, unsigned long ceiling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) pte_t *hugepte = hugepd_page(*hpdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) unsigned long pdmask = ~((1UL << pdshift) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) unsigned int num_hugepd = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) unsigned int shift = hugepd_shift(*hpdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* Note: On fsl the hpdp may be the first of several */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (shift > pdshift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) num_hugepd = 1 << (shift - pdshift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) start &= pdmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (start < floor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (ceiling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) ceiling &= pdmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (! ceiling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (end - 1 > ceiling - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) for (i = 0; i < num_hugepd; i++, hpdp++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) *hpdp = __hugepd(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (shift >= pdshift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) hugepd_free(tlb, hugepte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) pgtable_free_tlb(tlb, hugepte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) get_hugepd_cache_index(pdshift - shift));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) unsigned long addr, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) unsigned long floor, unsigned long ceiling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) unsigned long start = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) pgtable_t token = pmd_pgtable(*pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) start &= PMD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (start < floor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (ceiling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) ceiling &= PMD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (!ceiling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (end - 1 > ceiling - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) pmd_clear(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) pte_free_tlb(tlb, token, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) mm_dec_nr_ptes(tlb->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) unsigned long addr, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) unsigned long floor, unsigned long ceiling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) unsigned long start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) start = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) unsigned long more;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) pmd = pmd_offset(pud, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) next = pmd_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (pmd_none_or_clear_bad(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * if it is not hugepd pointer, we should already find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * it cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) WARN_ON(!IS_ENABLED(CONFIG_PPC_8xx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) hugetlb_free_pte_range(tlb, pmd, addr, end, floor, ceiling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * Increment next by the size of the huge mapping since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * there may be more than one entry at this level for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * single hugepage, but all of them point to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * the same kmem cache that holds the hugepte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) more = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (more > next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) next = more;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) addr, next, floor, ceiling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) } while (addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) start &= PUD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (start < floor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (ceiling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) ceiling &= PUD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (!ceiling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (end - 1 > ceiling - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) pmd = pmd_offset(pud, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) pud_clear(pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) pmd_free_tlb(tlb, pmd, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) mm_dec_nr_pmds(tlb->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) unsigned long addr, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) unsigned long floor, unsigned long ceiling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) unsigned long start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) start = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) pud = pud_offset(p4d, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) next = pud_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (!is_hugepd(__hugepd(pud_val(*pud)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (pud_none_or_clear_bad(pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) ceiling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) unsigned long more;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * Increment next by the size of the huge mapping since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * there may be more than one entry at this level for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * single hugepage, but all of them point to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * the same kmem cache that holds the hugepte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) more = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (more > next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) next = more;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) addr, next, floor, ceiling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) } while (addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) start &= PGDIR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (start < floor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (ceiling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) ceiling &= PGDIR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (!ceiling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (end - 1 > ceiling - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) pud = pud_offset(p4d, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) p4d_clear(p4d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) pud_free_tlb(tlb, pud, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) mm_dec_nr_puds(tlb->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * This function frees user-level page tables of a process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) void hugetlb_free_pgd_range(struct mmu_gather *tlb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) unsigned long addr, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) unsigned long floor, unsigned long ceiling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * Because there are a number of different possible pagetable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * layouts for hugepage ranges, we limit knowledge of how
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * things should be laid out to the allocation path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * (huge_pte_alloc(), above). Everything else works out the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * structure as it goes from information in the hugepd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * pointers. That means that we can't here use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * optimization used in the normal page free_pgd_range(), of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * checking whether we're actually covering a large enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * range to have to do anything at the top level of the walk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * instead of at the bottom.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * To make sense of this, you should probably go read the big
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * block comment at the top of the normal free_pgd_range(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) next = pgd_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) pgd = pgd_offset(tlb->mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) p4d = p4d_offset(pgd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (p4d_none_or_clear_bad(p4d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) unsigned long more;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * Increment next by the size of the huge mapping since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * there may be more than one entry at the pgd level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * for a single hugepage, but all of them point to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * same kmem cache that holds the hugepte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) more = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (more > next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) next = more;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) free_hugepd_range(tlb, (hugepd_t *)p4d, PGDIR_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) addr, next, floor, ceiling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) } while (addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct page *follow_huge_pd(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) unsigned long address, hugepd_t hpd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) int flags, int pdshift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) pte_t *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct page *page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) unsigned long mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) int shift = hugepd_shift(hpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) struct mm_struct *mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * hugepage directory entries are protected by mm->page_table_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * Use this instead of huge_pte_lockptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) ptl = &mm->page_table_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) spin_lock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) ptep = hugepte_offset(hpd, address, pdshift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (pte_present(*ptep)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) mask = (1UL << shift) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) page = pte_page(*ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) page += ((address & mask) >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (flags & FOLL_GET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (is_hugetlb_entry_migration(*ptep)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) spin_unlock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) __migration_entry_wait(mm, ptep, ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) spin_unlock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) #ifdef CONFIG_PPC_MM_SLICES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) unsigned long len, unsigned long pgoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) struct hstate *hstate = hstate_file(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) #ifdef CONFIG_PPC_RADIX_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (radix_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return radix__hugetlb_get_unmapped_area(file, addr, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) pgoff, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /* With radix we don't use slice, so derive it from vma*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return 1UL << mmu_psize_to_shift(psize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return vma_kernel_pagesize(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) bool __init arch_hugetlb_valid_size(unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) int shift = __ffs(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) int mmu_psize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /* Check that it is a page size supported by the hardware and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * that it fits within pagetable and slice limits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (size <= PAGE_SIZE || !is_power_of_2(size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) mmu_psize = check_and_get_huge_psize(shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (mmu_psize < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) static int __init add_huge_page_size(unsigned long long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) int shift = __ffs(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (!arch_hugetlb_valid_size((unsigned long)size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) hugetlb_add_hstate(shift - PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) static int __init hugetlbpage_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) bool configured = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) int psize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (hugetlb_disabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) pr_info("HugeTLB support is disabled!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) !mmu_has_feature(MMU_FTR_16M_PAGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) unsigned shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) unsigned pdshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (!mmu_psize_defs[psize].shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) shift = mmu_psize_to_shift(psize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (shift > PGDIR_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) else if (shift > PUD_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) pdshift = PGDIR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) else if (shift > PMD_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) pdshift = PUD_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) pdshift = PMD_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (shift < PUD_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) pdshift = PMD_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) else if (shift < PGDIR_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) pdshift = PUD_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) pdshift = PGDIR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (add_huge_page_size(1ULL << shift) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * if we have pdshift and shift value same, we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * use pgt cache for hugepd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (pdshift > shift) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (!IS_ENABLED(CONFIG_PPC_8xx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) pgtable_cache_add(pdshift - shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) } else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) IS_ENABLED(CONFIG_PPC_8xx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) pgtable_cache_add(PTE_T_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) configured = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (configured) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (IS_ENABLED(CONFIG_HUGETLB_PAGE_SIZE_VARIABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) hugetlbpage_init_default();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) pr_info("Failed to initialize. Disabling HugeTLB");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) arch_initcall(hugetlbpage_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) void flush_dcache_icache_hugepage(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) void *start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) BUG_ON(!PageCompound(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) for (i = 0; i < compound_nr(page); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (!PageHighMem(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) __flush_dcache_icache(page_address(page+i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) start = kmap_atomic(page+i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) __flush_dcache_icache(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) kunmap_atomic(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) void __init gigantic_hugetlb_cma_reserve(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) unsigned long order = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (radix_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) order = PUD_SHIFT - PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) else if (!firmware_has_feature(FW_FEATURE_LPAR) && mmu_psize_defs[MMU_PAGE_16G].shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * For pseries we do use ibm,expected#pages for reserving 16G pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (order) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) VM_WARN_ON(order < MAX_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) hugetlb_cma_reserve(order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }