Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5) #ifndef __ASM_PGTABLE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) #define __ASM_PGTABLE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <asm/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <asm/proc-fns.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <asm/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <asm/mte.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <asm/pgtable-hwdef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <asm/pgtable-prot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * VMALLOC range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * VMALLOC_START: beginning of the kernel vmalloc space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * VMALLOC_END: extends to the available space below vmemmap, PCI I/O space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  *	and fixed mappings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #define VMALLOC_START		(MODULES_END)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #define VMALLOC_END		(- PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #define vmemmap			((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #define FIRST_USER_ADDRESS	0UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <asm/cmpxchg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <asm/fixmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/mmdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/mm_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) /* Set stride and tlb_level in flush_*_tlb_range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define flush_pmd_tlb_range(vma, addr, end)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	__flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define flush_pud_tlb_range(vma, addr, end)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	__flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  * Outside of a few very special situations (e.g. hibernation), we always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  * use broadcast TLB invalidation instructions, therefore a spurious page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  * fault on one CPU which has been handled concurrently by another CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  * does not need to perform additional invalidation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58)  * ZERO_PAGE is a global shared page that is always zero: used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59)  * for zero-mapped memory areas etc..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define ZERO_PAGE(vaddr)	phys_to_page(__pa_symbol(empty_zero_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define pte_ERROR(e)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68)  * Macros to convert between a physical address and its placement in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69)  * page table entry, taking care of 52-bit addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #ifdef CONFIG_ARM64_PA_BITS_52
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) static inline phys_addr_t __pte_to_phys(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	return (pte_val(pte) & PTE_ADDR_LOW) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 		((pte_val(pte) & PTE_ADDR_HIGH) << 36);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	return (phys | (phys >> 36)) & PTE_ADDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #define __pte_to_phys(pte)	(pte_val(pte) & PTE_ADDR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #define __phys_to_pte_val(phys)	(phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) #define pte_pfn(pte)		(__pte_to_phys(pte) >> PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #define pfn_pte(pfn,prot)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	__pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #define pte_none(pte)		(!pte_val(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #define pte_clear(mm,addr,ptep)	set_pte(ptep, __pte(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) #define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95)  * The following only work if pte_present(). Undefined behaviour otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) #define pte_present(pte)	(!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) #define pte_young(pte)		(!!(pte_val(pte) & PTE_AF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) #define pte_special(pte)	(!!(pte_val(pte) & PTE_SPECIAL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) #define pte_write(pte)		(!!(pte_val(pte) & PTE_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) #define pte_user_exec(pte)	(!(pte_val(pte) & PTE_UXN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) #define pte_cont(pte)		(!!(pte_val(pte) & PTE_CONT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) #define pte_devmap(pte)		(!!(pte_val(pte) & PTE_DEVMAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) #define pte_tagged(pte)		((pte_val(pte) & PTE_ATTRINDX_MASK) == \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 				 PTE_ATTRINDX(MT_NORMAL_TAGGED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) #define pte_cont_addr_end(addr, end)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) ({	unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK;	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	(__boundary - 1 < (end) - 1) ? __boundary : (end);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) #define pmd_cont_addr_end(addr, end)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) ({	unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK;	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	(__boundary - 1 < (end) - 1) ? __boundary : (end);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) #define pte_hw_dirty(pte)	(pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) #define pte_sw_dirty(pte)	(!!(pte_val(pte) & PTE_DIRTY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) #define pte_dirty(pte)		(pte_sw_dirty(pte) || pte_hw_dirty(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) #define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) #define pte_valid_not_user(pte) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) #define pte_valid_user(pte) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128)  * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129)  * so that we don't erroneously return false for pages that have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130)  * remapped as PROT_NONE but are yet to be flushed from the TLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131)  * Note that we can't make any assumptions based on the state of the access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132)  * flag, since ptep_clear_flush_young() elides a DSB when invalidating the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133)  * TLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) #define pte_accessible(mm, pte)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139)  * p??_access_permitted() is true for valid user mappings (subject to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140)  * write permission check). PROT_NONE mappings do not have the PTE_VALID bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141)  * set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) #define pte_access_permitted(pte, write) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	(pte_valid_user(pte) && (!(write) || pte_write(pte)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) #define pmd_access_permitted(pmd, write) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	(pte_access_permitted(pmd_pte(pmd), (write)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) #define pud_access_permitted(pud, write) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	(pte_access_permitted(pud_pte(pud), (write)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	pte_val(pte) &= ~pgprot_val(prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	pte_val(pte) |= pgprot_val(prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	pmd_val(pmd) &= ~pgprot_val(prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	return pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	pmd_val(pmd) |= pgprot_val(prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	return pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) static inline pte_t pte_mkwrite(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) static inline pte_t pte_mkclean(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) static inline pte_t pte_mkdirty(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	if (pte_write(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) static inline pte_t pte_wrprotect(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	 * clear), set the PTE_DIRTY bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	if (pte_hw_dirty(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		pte = pte_mkdirty(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) static inline pte_t pte_mkold(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	return clear_pte_bit(pte, __pgprot(PTE_AF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) static inline pte_t pte_mkyoung(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	return set_pte_bit(pte, __pgprot(PTE_AF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) static inline pte_t pte_mkspecial(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) static inline pte_t pte_mkcont(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	pte = set_pte_bit(pte, __pgprot(PTE_CONT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) static inline pte_t pte_mknoncont(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	return clear_pte_bit(pte, __pgprot(PTE_CONT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) static inline pte_t pte_mkpresent(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	return set_pte_bit(pte, __pgprot(PTE_VALID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) static inline pmd_t pmd_mkcont(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) static inline pte_t pte_mkdevmap(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) static inline void set_pte(pte_t *ptep, pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	WRITE_ONCE(*ptep, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	 * Only if the new pte is valid and kernel, otherwise TLB maintenance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	 * or update_mmu_cache() have the necessary barriers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	if (pte_valid_not_user(pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		dsb(ishst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) extern void __sync_icache_dcache(pte_t pteval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271)  * PTE bits configuration in the presence of hardware Dirty Bit Management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272)  * (PTE_WRITE == PTE_DBM):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274)  * Dirty  Writable | PTE_RDONLY  PTE_WRITE  PTE_DIRTY (sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275)  *   0      0      |   1           0          0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276)  *   0      1      |   1           1          0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277)  *   1      0      |   1           0          1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278)  *   1      1      |   0           1          x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280)  * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281)  * the page fault mechanism. Checking the dirty status of a pte becomes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283)  *   PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 					   pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	pte_t old_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	if (!IS_ENABLED(CONFIG_DEBUG_VM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	old_pte = READ_ONCE(*ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	if (!pte_valid(old_pte) || !pte_valid(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	 * Check for potential race with hardware updates of the pte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	 * (ptep_set_access_flags safely changes valid ptes without going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	 * through an invalid entry).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	VM_WARN_ONCE(!pte_young(pte),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		     "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		     __func__, pte_val(old_pte), pte_val(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		     "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		     __func__, pte_val(old_pte), pte_val(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 			      pte_t *ptep, pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		__sync_icache_dcache(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	if (system_supports_mte() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	    pte_present(pte) && pte_tagged(pte) && !pte_special(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		mte_sync_tags(ptep, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	__check_racy_pte_update(mm, ptep, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	set_pte(ptep, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330)  * Huge pte definitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) #define pte_mkhuge(pte)		(__pte(pte_val(pte) & ~PTE_TABLE_BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335)  * Hugetlb definitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) #define HUGE_MAX_HSTATE		4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) #define HPAGE_SHIFT		PMD_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) #define HPAGE_SIZE		(_AC(1, UL) << HPAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) #define HPAGE_MASK		(~(HPAGE_SIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) static inline pte_t pgd_pte(pgd_t pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	return __pte(pgd_val(pgd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) static inline pte_t p4d_pte(p4d_t p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	return __pte(p4d_val(p4d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) static inline pte_t pud_pte(pud_t pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	return __pte(pud_val(pud));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) static inline pud_t pte_pud(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	return __pud(pte_val(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) static inline pmd_t pud_pmd(pud_t pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	return __pmd(pud_val(pud));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) static inline pte_t pmd_pte(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	return __pte(pmd_val(pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) static inline pmd_t pte_pmd(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	return __pmd(pte_val(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) static inline pgprot_t mk_pud_sect_prot(pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) #ifdef CONFIG_NUMA_BALANCING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390)  * See the comment in include/linux/pgtable.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) static inline int pte_protnone(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) static inline int pmd_protnone(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	return pte_protnone(pmd_pte(pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) #define pmd_present_invalid(pmd)     (!!(pmd_val(pmd) & PMD_PRESENT_INVALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) static inline int pmd_present(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	return pte_present(pmd_pte(pmd)) || pmd_present_invalid(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411)  * THP definitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) static inline int pmd_trans_huge(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) #define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) #define pmd_young(pmd)		pte_young(pmd_pte(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) #define pmd_valid(pmd)		pte_valid(pmd_pte(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) #define pmd_wrprotect(pmd)	pte_pmd(pte_wrprotect(pmd_pte(pmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) #define pmd_mkold(pmd)		pte_pmd(pte_mkold(pmd_pte(pmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) #define pmd_mkwrite(pmd)	pte_pmd(pte_mkwrite(pmd_pte(pmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) #define pmd_mkclean(pmd)	pte_pmd(pte_mkclean(pmd_pte(pmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) #define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) #define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) static inline pmd_t pmd_mkinvalid(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	pmd = set_pmd_bit(pmd, __pgprot(PMD_PRESENT_INVALID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	pmd = clear_pmd_bit(pmd, __pgprot(PMD_SECT_VALID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	return pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) #define pmd_thp_or_huge(pmd)	(pmd_huge(pmd) || pmd_trans_huge(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) #define pmd_write(pmd)		pte_write(pmd_pte(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) #define pmd_mkhuge(pmd)		(__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) #define pmd_devmap(pmd)		pte_devmap(pmd_pte(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) static inline pmd_t pmd_mkdevmap(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) #define __pmd_to_phys(pmd)	__pte_to_phys(pmd_pte(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) #define __phys_to_pmd_val(phys)	__phys_to_pte_val(phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) #define pmd_pfn(pmd)		((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) #define pfn_pmd(pfn,prot)	__pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) #define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) #define pud_young(pud)		pte_young(pud_pte(pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) #define pud_mkyoung(pud)	pte_pud(pte_mkyoung(pud_pte(pud)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) #define pud_write(pud)		pte_write(pud_pte(pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) #define pud_mkhuge(pud)		(__pud(pud_val(pud) & ~PUD_TABLE_BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) #define __pud_to_phys(pud)	__pte_to_phys(pud_pte(pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) #define __phys_to_pud_val(phys)	__phys_to_pte_val(phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) #define pud_pfn(pud)		((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) #define pfn_pud(pfn,prot)	__pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) #define set_pmd_at(mm, addr, pmdp, pmd)	set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) #define set_pud_at(mm, addr, pudp, pud)	set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) #define __p4d_to_phys(p4d)	__pte_to_phys(p4d_pte(p4d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) #define __phys_to_p4d_val(phys)	__phys_to_pte_val(phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) #define __pgd_to_phys(pgd)	__pte_to_phys(pgd_pte(pgd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) #define __phys_to_pgd_val(phys)	__phys_to_pte_val(phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) #define __pgprot_modify(prot,mask,bits) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) #define pgprot_nx(prot) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	__pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486)  * Mark the prot value as uncacheable and unbufferable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) #define pgprot_noncached(prot) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) #define pgprot_writecombine(prot) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) #define pgprot_device(prot) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) #define pgprot_tagged(prot) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) #define pgprot_mhp	pgprot_tagged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498)  * DMA allocations for non-coherent devices use what the Arm architecture calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499)  * "Normal non-cacheable" memory, which permits speculation, unaligned accesses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500)  * and merging of writes.  This is different from "Device-nGnR[nE]" memory which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501)  * is intended for MMIO and thus forbids speculation, preserves access size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502)  * requires strict alignment and can also force write responses to come from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503)  * endpoint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) #define pgprot_dmacoherent(prot) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 			PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510)  * Mark the prot value as outer cacheable and inner non-cacheable. Non-coherent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511)  * devices on a system with support for a system or last level cache use these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512)  * attributes to cache allocations in the system cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) #define pgprot_syscached(prot) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 			PTE_ATTRINDX(MT_NORMAL_iNC_oWB) | PTE_PXN | PTE_UXN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) #define __HAVE_PHYS_MEM_ACCESS_PROT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) struct file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 				     unsigned long size, pgprot_t vma_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) #define pmd_none(pmd)		(!pmd_val(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) #define pmd_bad(pmd)		(!(pmd_val(pmd) & PMD_TABLE_BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) #define pmd_table(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 				 PMD_TYPE_TABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) #define pmd_sect(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 				 PMD_TYPE_SECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) #define pmd_leaf(pmd)		pmd_sect(pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) static inline bool pud_sect(pud_t pud) { return false; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) static inline bool pud_table(pud_t pud) { return true; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) #define pud_sect(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 				 PUD_TYPE_SECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) #define pud_table(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 				 PUD_TYPE_TABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) extern pgd_t init_pg_dir[PTRS_PER_PGD];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) extern pgd_t init_pg_end[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) extern pgd_t idmap_pg_end[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) extern pgd_t reserved_pg_dir[PTRS_PER_PGD];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) #ifdef CONFIG_MEMORY_HOTPLUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) extern int populate_range_driver_managed(u64 start, u64 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		const char *resource_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) extern int depopulate_range_driver_managed(u64 start, u64 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		const char *resource_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) static inline bool in_swapper_pgdir(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	return ((unsigned long)addr & PAGE_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	        ((unsigned long)swapper_pg_dir & PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) #ifdef __PAGETABLE_PMD_FOLDED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	if (in_swapper_pgdir(pmdp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) #endif /* __PAGETABLE_PMD_FOLDED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	WRITE_ONCE(*pmdp, pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	if (pmd_valid(pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		dsb(ishst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) static inline void pmd_clear(pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	set_pmd(pmdp, __pmd(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	return __pmd_to_phys(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) static inline unsigned long pmd_page_vaddr(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	return (unsigned long)__va(pmd_page_paddr(pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) /* Find an entry in the third-level page table. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) #define pte_offset_phys(dir,addr)	(pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) #define pte_set_fixmap(addr)		((pte_t *)set_fixmap_offset(FIX_PTE, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) #define pte_set_fixmap_offset(pmd, addr)	pte_set_fixmap(pte_offset_phys(pmd, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) #define pte_clear_fixmap()		clear_fixmap(FIX_PTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) #define pmd_page(pmd)			phys_to_page(__pmd_to_phys(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) /* use ONLY for statically allocated translation tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) #define pte_offset_kimg(dir,addr)	((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611)  * Conversion functions: convert a page and protection to a page entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612)  * and a page entry and page directory to the page they refer to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) #define mk_pte(page,prot)	pfn_pte(page_to_pfn(page),prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) #if CONFIG_PGTABLE_LEVELS > 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) #define pmd_ERROR(e)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) #define pud_none(pud)		(!pud_val(pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) #define pud_bad(pud)		(!(pud_val(pud) & PUD_TABLE_BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) #define pud_present(pud)	pte_present(pud_pte(pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) #define pud_leaf(pud)		pud_sect(pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) #define pud_valid(pud)		pte_valid(pud_pte(pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) static inline void set_pud(pud_t *pudp, pud_t pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) #ifdef __PAGETABLE_PUD_FOLDED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	if (in_swapper_pgdir(pudp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) #endif /* __PAGETABLE_PUD_FOLDED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	WRITE_ONCE(*pudp, pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	if (pud_valid(pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		dsb(ishst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) static inline void pud_clear(pud_t *pudp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	set_pud(pudp, __pud(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) static inline phys_addr_t pud_page_paddr(pud_t pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	return __pud_to_phys(pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) static inline unsigned long pud_page_vaddr(pud_t pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	return (unsigned long)__va(pud_page_paddr(pud));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) /* Find an entry in the second-level page table. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) #define pmd_offset_phys(dir, addr)	(pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) #define pmd_set_fixmap(addr)		((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) #define pmd_set_fixmap_offset(pud, addr)	pmd_set_fixmap(pmd_offset_phys(pud, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) #define pmd_clear_fixmap()		clear_fixmap(FIX_PMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) #define pud_page(pud)			phys_to_page(__pud_to_phys(pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) /* use ONLY for statically allocated translation tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) #define pmd_offset_kimg(dir,addr)	((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) #define pud_page_paddr(pud)	({ BUILD_BUG(); 0; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) #define pmd_set_fixmap(addr)		NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) #define pmd_set_fixmap_offset(pudp, addr)	((pmd_t *)pudp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) #define pmd_clear_fixmap()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) #define pmd_offset_kimg(dir,addr)	((pmd_t *)dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) #endif	/* CONFIG_PGTABLE_LEVELS > 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) #if CONFIG_PGTABLE_LEVELS > 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) #define pud_ERROR(e)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) #define p4d_none(p4d)		(!p4d_val(p4d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) #define p4d_bad(p4d)		(!(p4d_val(p4d) & 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) #define p4d_present(p4d)	(p4d_val(p4d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	if (in_swapper_pgdir(p4dp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	WRITE_ONCE(*p4dp, p4d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	dsb(ishst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) static inline void p4d_clear(p4d_t *p4dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	set_p4d(p4dp, __p4d(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	return __p4d_to_phys(p4d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) static inline unsigned long p4d_page_vaddr(p4d_t p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	return (unsigned long)__va(p4d_page_paddr(p4d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) /* Find an entry in the frst-level page table. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) #define pud_offset_phys(dir, addr)	(p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) #define pud_set_fixmap(addr)		((pud_t *)set_fixmap_offset(FIX_PUD, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) #define pud_set_fixmap_offset(p4d, addr)	pud_set_fixmap(pud_offset_phys(p4d, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) #define pud_clear_fixmap()		clear_fixmap(FIX_PUD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) #define p4d_page(p4d)		pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) /* use ONLY for statically allocated translation tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) #define pud_offset_kimg(dir,addr)	((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) #define p4d_page_paddr(p4d)	({ BUILD_BUG(); 0;})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) #define pgd_page_paddr(pgd)	({ BUILD_BUG(); 0;})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) #define pud_set_fixmap(addr)		NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) #define pud_set_fixmap_offset(pgdp, addr)	((pud_t *)pgdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) #define pud_clear_fixmap()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) #define pud_offset_kimg(dir,addr)	((pud_t *)dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) #define pgd_ERROR(e)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) #define pgd_set_fixmap(addr)	((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) #define pgd_clear_fixmap()	clear_fixmap(FIX_PGD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	 * Normal and Normal-Tagged are two different memory types and indices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	 * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 			      PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 			      PTE_ATTRINDX_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	/* preserve the hardware dirty information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	if (pte_hw_dirty(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		pte = pte_mkdirty(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) extern int ptep_set_access_flags(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 				 unsigned long address, pte_t *ptep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 				 pte_t entry, int dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 					unsigned long address, pmd_t *pmdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 					pmd_t entry, int dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) static inline int pud_devmap(pud_t pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) static inline int pgd_devmap(pgd_t pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799)  * Atomic pte/pmd modifications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) static inline int __ptep_test_and_clear_young(pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	pte_t old_pte, pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	pte = READ_ONCE(*ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		old_pte = pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		pte = pte_mkold(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 					       pte_val(old_pte), pte_val(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	} while (pte_val(pte) != pte_val(old_pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	return pte_young(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 					    unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 					    pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	return __ptep_test_and_clear_young(ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 					 unsigned long address, pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	int young = ptep_test_and_clear_young(vma, address, ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	if (young) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		 * We can elide the trailing DSB here since the worst that can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		 * happen is that a CPU continues to use the young entry in its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		 * TLB and we mistakenly reclaim the associated page. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		 * window for such an event is bounded by the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		 * context-switch, which provides a DSB to complete the TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		 * invalidation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		flush_tlb_page_nosync(vma, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	return young;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 					    unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 					    pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 				       unsigned long address, pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	return __pte(xchg_relaxed(&pte_val(*ptep), 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 					    unsigned long address, pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872)  * ptep_set_wrprotect - mark read-only while trasferring potential hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873)  * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) #define __HAVE_ARCH_PTEP_SET_WRPROTECT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	pte_t old_pte, pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	pte = READ_ONCE(*ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		old_pte = pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		pte = pte_wrprotect(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 					       pte_val(old_pte), pte_val(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	} while (pte_val(pte) != pte_val(old_pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) #define __HAVE_ARCH_PMDP_SET_WRPROTECT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) static inline void pmdp_set_wrprotect(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 				      unsigned long address, pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) #define pmdp_establish pmdp_establish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		unsigned long address, pmd_t *pmdp, pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906)  * Encode and decode a swap entry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907)  *	bits 0-1:	present (must be zero)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908)  *	bits 2-7:	swap type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909)  *	bits 8-57:	swap offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910)  *	bit  58:	PTE_PROT_NONE (must be zero)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) #define __SWP_TYPE_SHIFT	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) #define __SWP_TYPE_BITS		6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) #define __SWP_OFFSET_BITS	50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) #define __SWP_TYPE_MASK		((1 << __SWP_TYPE_BITS) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) #define __SWP_OFFSET_MASK	((1UL << __SWP_OFFSET_BITS) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) #define __swp_type(x)		(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) #define __swp_offset(x)		(((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) #define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) #define __pmd_to_swp_entry(pmd)		((swp_entry_t) { pmd_val(pmd) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) #define __swp_entry_to_pmd(swp)		__pmd((swp).val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932)  * Ensure that there are not more swap files than can be encoded in the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933)  * PTEs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) extern int kern_addr_valid(unsigned long addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) #ifdef CONFIG_ARM64_MTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) #define __HAVE_ARCH_PREPARE_TO_SWAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) static inline int arch_prepare_to_swap(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	if (system_supports_mte())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		return mte_save_tags(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) #define __HAVE_ARCH_SWAP_INVALIDATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	if (system_supports_mte())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		mte_invalidate_tags(type, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) static inline void arch_swap_invalidate_area(int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	if (system_supports_mte())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		mte_invalidate_tags_area(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) #define __HAVE_ARCH_SWAP_RESTORE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) static inline void arch_swap_restore(swp_entry_t entry, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	if (system_supports_mte() && mte_restore_tags(entry, page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		set_bit(PG_mte_tagged, &page->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) #endif /* CONFIG_ARM64_MTE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972)  * On AArch64, the cache coherency is handled via the set_pte_at() function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) static inline void update_mmu_cache(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 				    unsigned long addr, pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	 * We don't do anything here, so there's a very small chance of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	 * us retaking a user fault which we just fixed up. The alternative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	 * is doing a dsb(ishst), but that penalises the fastpath.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) #ifdef CONFIG_ARM64_PA_BITS_52
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) #define phys_to_ttbr(addr)	(((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) #define phys_to_ttbr(addr)	(addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993)  * On arm64 without hardware Access Flag, copying from user will fail because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994)  * the pte is old and cannot be marked young. So we always end up with zeroed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995)  * page after fork() + CoW for pfn mappings. We don't always have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996)  * hardware-managed access flag on arm64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) static inline bool arch_faults_on_old_pte(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	WARN_ON(preemptible());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	return !cpu_has_hw_af();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) #define arch_faults_on_old_pte		arch_faults_on_old_pte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)  * Experimentally, it's cheap to set the access flag in hardware and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)  * benefit from prefaulting mappings as 'old' to start with.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) static inline bool arch_wants_old_prefaulted_pte(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	return !arch_faults_on_old_pte();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) #define arch_wants_old_prefaulted_pte	arch_wants_old_prefaulted_pte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) #endif /* !__ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) #endif /* __ASM_PGTABLE_H */