^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * This file contains common routines for dealing with free of page tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Along with common page table handling code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Derived from arch/powerpc/mm/tlb_64.c:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * and Cort Dougan (PReP) (cort@cs.nmt.edu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Copyright (C) 1996 Paul Mackerras
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Derived from "arch/i386/mm/init.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Dave Engebretsen <engebret@us.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Rework for PPC64 port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/hugetlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/tlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/hugetlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static inline int is_exec_fault(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return current->thread.regs && TRAP(current->thread.regs) == 0x400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* We only try to do i/d cache coherency on stuff that looks like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * reasonably "normal" PTEs. We currently require a PTE to be present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * and we avoid _PAGE_SPECIAL and cache inhibited pte. We also only do that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * on userspace PTEs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static inline int pte_looks_normal(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (pte_present(pte) && !pte_special(pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (pte_ci(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (pte_user(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static struct page *maybe_pte_to_page(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned long pfn = pte_pfn(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (unlikely(!pfn_valid(pfn)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) page = pfn_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (PageReserved(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #ifdef CONFIG_PPC_BOOK3S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* Server-style MMU handles coherency when hashing if HW exec permission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * is supposed per page (currently 64-bit only). If not, then, we always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * flush the cache for valid PTEs in set_pte. Embedded CPU without HW exec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * support falls into the same category.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static pte_t set_pte_filter_hash(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (radix_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) cpu_has_feature(CPU_FTR_NOEXECUTE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct page *pg = maybe_pte_to_page(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (!pg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (!test_bit(PG_arch_1, &pg->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) flush_dcache_icache_page(pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) set_bit(PG_arch_1, &pg->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #else /* CONFIG_PPC_BOOK3S */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static pte_t set_pte_filter_hash(pte_t pte) { return pte; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #endif /* CONFIG_PPC_BOOK3S */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* Embedded type MMU with HW exec support. This is a bit more complicated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * instead we "filter out" the exec permission for non clean pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static inline pte_t set_pte_filter(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct page *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return set_pte_filter_hash(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* No exec permission in the first place, move on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (!pte_exec(pte) || !pte_looks_normal(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* If you set _PAGE_EXEC on weird pages you're on your own */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) pg = maybe_pte_to_page(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (unlikely(!pg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /* If the page clean, we move on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (test_bit(PG_arch_1, &pg->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* If it's an exec fault, we flush the cache and make it clean */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (is_exec_fault()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) flush_dcache_icache_page(pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) set_bit(PG_arch_1, &pg->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* Else, we filter out _PAGE_EXEC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return pte_exprotect(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) int dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct page *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* So here, we only care about exec faults, as we use them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * to recover lost _PAGE_EXEC and perform I$/D$ coherency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * if necessary. Also if _PAGE_EXEC is already set, same deal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * we just bail out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (dirty || pte_exec(pte) || !is_exec_fault())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #ifdef CONFIG_DEBUG_VM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /* So this is an exec fault, _PAGE_EXEC is not set. If it was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * an error we would have bailed out earlier in do_page_fault()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * but let's make sure of it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (WARN_ON(!(vma->vm_flags & VM_EXEC)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #endif /* CONFIG_DEBUG_VM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* If you set _PAGE_EXEC on weird pages you're on your own */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) pg = maybe_pte_to_page(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (unlikely(!pg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /* If the page is already clean, we move on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (test_bit(PG_arch_1, &pg->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /* Clean the page and set PG_arch_1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) flush_dcache_icache_page(pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) set_bit(PG_arch_1, &pg->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return pte_mkexec(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * set_pte stores a linux PTE into the linux page table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * Make sure hardware valid bit is not set. We don't do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * tlb flush for this update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* Note: mm->context.id might not yet have been assigned as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * this context might not have been activated yet when this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) pte = set_pte_filter(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* Perform the setting of the PTE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) __set_pte_at(mm, addr, ptep, pte, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) void unmap_kernel_page(unsigned long va)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) pmd_t *pmdp = pmd_off_k(va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) pte_t *ptep = pte_offset_kernel(pmdp, va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) pte_clear(&init_mm, va, ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) flush_tlb_kernel_range(va, va + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * This is called when relaxing access to a PTE. It's also called in the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * fault path when we don't hit any of the major fault cases, ie, a minor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * handled those two for us, we additionally deal with missing execute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * permission here on some processors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) pte_t *ptep, pte_t entry, int dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) int changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) entry = set_access_flags_filter(entry, vma, dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) changed = !pte_same(*(ptep), entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (changed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) assert_pte_locked(vma->vm_mm, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) __ptep_set_access_flags(vma, ptep, entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) address, mmu_virtual_psize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) #ifdef CONFIG_HUGETLB_PAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) int huge_ptep_set_access_flags(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) unsigned long addr, pte_t *ptep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) pte_t pte, int dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) #ifdef HUGETLB_NEED_PRELOAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * The "return 1" forces a call of update_mmu_cache, which will write a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * TLB entry. Without this, platforms that don't do a write of the TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * entry in the TLB miss handler asm will fault ad infinitum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ptep_set_access_flags(vma, addr, ptep, pte, dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) int changed, psize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) pte = set_access_flags_filter(pte, vma, dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) changed = !pte_same(*(ptep), pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (changed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct hstate *h = hstate_vma(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) psize = hstate_get_psize(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #ifdef CONFIG_DEBUG_VM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) assert_spin_locked(huge_pte_lockptr(h, vma->vm_mm, ptep));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * Not used on non book3s64 platforms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * 8xx compares it with mmu_virtual_psize to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * know if it is a huge page or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) psize = MMU_PAGE_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) __ptep_set_access_flags(vma, ptep, pte, addr, psize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) #if defined(CONFIG_PPC_8xx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) pmd_t *pmd = pmd_off(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) pte_basic_t val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) pte_basic_t *entry = &ptep->pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int num, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * Make sure hardware valid bit is not set. We don't do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * tlb flush for this update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) pte = set_pte_filter(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) val = pte_val(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) num = number_of_cells_per_pte(pmd, val, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) for (i = 0; i < num; i++, entry++, val += SZ_4K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) *entry = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) #endif /* CONFIG_HUGETLB_PAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) #ifdef CONFIG_DEBUG_VM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (mm == &init_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) pgd = mm->pgd + pgd_index(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) BUG_ON(pgd_none(*pgd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) p4d = p4d_offset(pgd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) BUG_ON(p4d_none(*p4d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) pud = pud_offset(p4d, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) BUG_ON(pud_none(*pud));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) pmd = pmd_offset(pud, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * khugepaged to collapse normal pages to hugepage, first set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * pmd to none to force page fault/gup to take mmap_lock. After
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * pmd is set to none, we do a pte_clear which does this assertion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * so if we find pmd none, return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (pmd_none(*pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) BUG_ON(!pmd_present(*pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) assert_spin_locked(pte_lockptr(mm, pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) #endif /* CONFIG_DEBUG_VM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) unsigned long vmalloc_to_phys(void *va)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) unsigned long pfn = vmalloc_to_pfn(va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) BUG_ON(!pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) EXPORT_SYMBOL_GPL(vmalloc_to_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * We have 4 cases for pgds and pmds:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * (1) invalid (all zeroes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * (2) pointer to next table, as normal; bottom 6 bits == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * (3) leaf pte for huge page _PAGE_PTE set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * So long as we atomically load page table pointers we are safe against teardown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * we can follow the address down to the the page and take a ref on it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * This function need to be called with interrupts disabled. We use this variant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) bool *is_thp, unsigned *hpage_shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) pgd_t *pgdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) p4d_t p4d, *p4dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) pud_t pud, *pudp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) pmd_t pmd, *pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) pte_t *ret_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) hugepd_t *hpdp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) unsigned pdshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (hpage_shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) *hpage_shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (is_thp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) *is_thp = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * Always operate on the local stack value. This make sure the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * value don't get updated by a parallel THP split/collapse,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * page fault or a page unmap. The return pte_t * is still not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * stable. So should be checked there for above conditions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * Top level is an exception because it is folded into p4d.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) pgdp = pgdir + pgd_index(ea);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) p4dp = p4d_offset(pgdp, ea);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) p4d = READ_ONCE(*p4dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) pdshift = P4D_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (p4d_none(p4d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (p4d_is_leaf(p4d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) ret_pte = (pte_t *)p4dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (is_hugepd(__hugepd(p4d_val(p4d)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) hpdp = (hugepd_t *)&p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) goto out_huge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * Even if we end up with an unmap, the pgtable will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * be freed, because we do an rcu free and here we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * irq disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) pdshift = PUD_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) pudp = pud_offset(&p4d, ea);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) pud = READ_ONCE(*pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (pud_none(pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (pud_is_leaf(pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) ret_pte = (pte_t *)pudp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (is_hugepd(__hugepd(pud_val(pud)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) hpdp = (hugepd_t *)&pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) goto out_huge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) pdshift = PMD_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) pmdp = pmd_offset(&pud, ea);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) pmd = READ_ONCE(*pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * A hugepage collapse is captured by this condition, see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * pmdp_collapse_flush.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (pmd_none(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * A hugepage split is captured by this condition, see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * pmdp_invalidate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * Huge page modification can be caught here too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (pmd_is_serializing(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (is_thp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) *is_thp = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) ret_pte = (pte_t *)pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (pmd_is_leaf(pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) ret_pte = (pte_t *)pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (is_hugepd(__hugepd(pmd_val(pmd)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) hpdp = (hugepd_t *)&pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) goto out_huge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return pte_offset_kernel(&pmd, ea);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) out_huge:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (!hpdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) ret_pte = hugepte_offset(*hpdp, ea, pdshift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) pdshift = hugepd_shift(*hpdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (hpage_shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) *hpage_shift = pdshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return ret_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) EXPORT_SYMBOL_GPL(__find_linux_pte);