^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * mm/pgtable-generic.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Generic pgtable methods declared in linux/pgtable.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2010 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/hugetlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/tlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * If a p?d_bad entry is found while walking page tables, report
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * the error, before resetting entry to p?d_none. Usually (but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * very seldom) called out from the p?d_none_or_clear_bad macros.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) void pgd_clear_bad(pgd_t *pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) pgd_ERROR(*pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) pgd_clear(pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #ifndef __PAGETABLE_P4D_FOLDED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) void p4d_clear_bad(p4d_t *p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) p4d_ERROR(*p4d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) p4d_clear(p4d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #ifndef __PAGETABLE_PUD_FOLDED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) void pud_clear_bad(pud_t *pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) pud_ERROR(*pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) pud_clear(pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Note that the pmd variant below can't be stub'ed out just as for p4d/pud
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * above. pmd folding is special and typically pmd_* macros refer to upper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * level even when folded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) void pmd_clear_bad(pmd_t *pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) pmd_ERROR(*pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) pmd_clear(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * Only sets the access flags (dirty, accessed), as well as write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * permission. Furthermore, we know it always gets set to a "more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * permissive" setting, which allows most architectures to optimize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * this. We return whether the PTE actually changed, which in turn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * instructs the caller to do things like update__mmu_cache. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * used to be done in the caller, but sparc needs minor faults to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * force that call on sun4c so we changed this macro slightly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) int ptep_set_access_flags(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) unsigned long address, pte_t *ptep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) pte_t entry, int dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) int changed = !pte_same(*ptep, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (changed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) set_pte_at(vma->vm_mm, address, ptep, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) flush_tlb_fix_spurious_fault(vma, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int ptep_clear_flush_young(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) unsigned long address, pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int young;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) young = ptep_test_and_clear_young(vma, address, ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (young)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) flush_tlb_page(vma, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return young;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct mm_struct *mm = (vma)->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) pte_t pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) pte = ptep_get_and_clear(mm, address, ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (pte_accessible(mm, pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) flush_tlb_page(vma, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) int pmdp_set_access_flags(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) unsigned long address, pmd_t *pmdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) pmd_t entry, int dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) int changed = !pmd_same(*pmdp, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) VM_BUG_ON(address & ~HPAGE_PMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (changed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) set_pmd_at(vma->vm_mm, address, pmdp, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) int pmdp_clear_flush_young(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) unsigned long address, pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) int young;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) VM_BUG_ON(address & ~HPAGE_PMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) young = pmdp_test_and_clear_young(vma, address, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (young)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return young;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) pmd_t pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) VM_BUG_ON(address & ~HPAGE_PMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) !pmd_devmap(*pmdp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) pud_t *pudp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) pud_t pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) VM_BUG_ON(address & ~HPAGE_PUD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) pgtable_t pgtable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) assert_spin_locked(pmd_lockptr(mm, pmdp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (!pmd_huge_pte(mm, pmdp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) INIT_LIST_HEAD(&pgtable->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) pmd_huge_pte(mm, pmdp) = pgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /* no "address" argument so destroys page coloring of some arch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) pgtable_t pgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) assert_spin_locked(pmd_lockptr(mm, pmdp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) pgtable = pmd_huge_pte(mm, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct page, lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (pmd_huge_pte(mm, pmdp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) list_del(&pgtable->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return pgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #ifndef __HAVE_ARCH_PMDP_INVALIDATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #ifndef pmdp_collapse_flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * pmd and hugepage pte format are same. So we could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * use the same function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) pmd_t pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) VM_BUG_ON(address & ~HPAGE_PMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) VM_BUG_ON(pmd_trans_huge(*pmdp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* collapse entails shooting down ptes not pmd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */