^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _ASM_POWERPC_PGTABLE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _ASM_POWERPC_PGTABLE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/mmdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/mmzone.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/processor.h> /* For TASK_SIZE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) struct mm_struct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #endif /* !__ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #ifdef CONFIG_PPC_BOOK3S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/book3s/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/nohash/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #endif /* !CONFIG_PPC_BOOK3S */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /* Note due to the way vm flags are laid out, the bits are XWR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define __P000 PAGE_NONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define __P001 PAGE_READONLY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define __P010 PAGE_COPY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define __P011 PAGE_COPY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define __P100 PAGE_READONLY_X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define __P101 PAGE_READONLY_X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define __P110 PAGE_COPY_X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define __P111 PAGE_COPY_X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define __S000 PAGE_NONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define __S001 PAGE_READONLY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define __S010 PAGE_SHARED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define __S011 PAGE_SHARED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define __S100 PAGE_READONLY_X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define __S101 PAGE_READONLY_X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define __S110 PAGE_SHARED_X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define __S111 PAGE_SHARED_X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* Keep these as a macros to avoid include dependency mess */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define pte_page(x) pfn_to_page(pte_pfn(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * Select all bits except the pfn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static inline pgprot_t pte_pgprot(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned long pte_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) pte_flags = pte_val(pte) & ~PTE_RPN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return __pgprot(pte_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #ifndef pmd_page_vaddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static inline unsigned long pmd_page_vaddr(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return ((unsigned long)__va(pmd_val(pmd) & ~PMD_MASKED_BITS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define pmd_page_vaddr pmd_page_vaddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * ZERO_PAGE is a global shared page that is always zero: used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * for zero-mapped memory areas etc..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) extern unsigned long empty_zero_page[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) extern pgd_t swapper_pg_dir[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) extern void paging_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) extern unsigned long ioremap_bot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * kern_addr_valid is intended to indicate whether an address is a valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * kernel address. Most 32-bit archs define it as always true (like this)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * but most 64-bit archs actually perform a test. What should we do here?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define kern_addr_valid(addr) (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #ifndef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define pmd_large(pmd) 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* can we use this in kvm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) unsigned long vmalloc_to_phys(void *vmalloc_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) void pgtable_cache_add(unsigned int shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) pte_t *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) void mark_initmem_nx(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static inline void mark_initmem_nx(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * When used, PTE_FRAG_NR is defined in subarch pgtable.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * so we are sure it is included when arriving here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #ifdef PTE_FRAG_NR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static inline void *pte_frag_get(mm_context_t *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return ctx->pte_frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static inline void pte_frag_set(mm_context_t *ctx, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) ctx->pte_frag = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define PTE_FRAG_NR 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define PTE_FRAG_SIZE_SHIFT PAGE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static inline void *pte_frag_get(mm_context_t *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static inline void pte_frag_set(mm_context_t *ctx, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #ifndef pmd_is_leaf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define pmd_is_leaf pmd_is_leaf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static inline bool pmd_is_leaf(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #ifndef pud_is_leaf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define pud_is_leaf pud_is_leaf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static inline bool pud_is_leaf(pud_t pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #ifndef p4d_is_leaf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define p4d_is_leaf p4d_is_leaf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static inline bool p4d_is_leaf(p4d_t p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define is_ioremap_addr is_ioremap_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static inline bool is_ioremap_addr(const void *x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) unsigned long addr = (unsigned long)x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return addr >= IOREMAP_BASE && addr < IOREMAP_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #endif /* CONFIG_PPC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #endif /* __ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #endif /* _ASM_POWERPC_PGTABLE_H */