^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _ASM_X86_PGTABLE_2LEVEL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _ASM_X86_PGTABLE_2LEVEL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #define pte_ERROR(e) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) pr_err("%s:%d: bad pte %08lx\n", __FILE__, __LINE__, (e).pte_low)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define pgd_ERROR(e) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) pr_err("%s:%d: bad pgd %08lx\n", __FILE__, __LINE__, pgd_val(e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Certain architectures need to do special things when PTEs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * within a page table are directly modified. Thus, the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * hook is made available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) static inline void native_set_pte(pte_t *ptep , pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *ptep = pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *pmdp = pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static inline void native_set_pud(pud_t *pudp, pud_t pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) native_set_pte(ptep, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static inline void native_pmd_clear(pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) native_set_pmd(pmdp, __pmd(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static inline void native_pud_clear(pud_t *pudp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static inline void native_pte_clear(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) unsigned long addr, pte_t *xp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) *xp = native_make_pte(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static inline pte_t native_ptep_get_and_clear(pte_t *xp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return __pte(xchg(&xp->pte_low, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return __pmd(xchg((pmdval_t *)xp, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static inline pud_t native_pudp_get_and_clear(pud_t *xp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return __pud(xchg((pudval_t *)xp, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /* Bit manipulation helper on pte/pgoff entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) unsigned long mask, unsigned int leftshift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return ((value >> rightshift) & mask) << leftshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* Encode and de-code a swap entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define SWP_TYPE_BITS 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define __swp_type(x) (((x).val >> (_PAGE_BIT_PRESENT + 1)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) & ((1U << SWP_TYPE_BITS) - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define __swp_offset(x) ((x).val >> SWP_OFFSET_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define __swp_entry(type, offset) ((swp_entry_t) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) ((type) << (_PAGE_BIT_PRESENT + 1)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) | ((offset) << SWP_OFFSET_SHIFT) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* No inverted PFNs on 2 level page tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static inline u64 protnone_mask(u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static inline bool __pte_needs_invert(u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #endif /* _ASM_X86_PGTABLE_2LEVEL_H */