^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _ASM_X86_PGTABLE_3LEVEL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _ASM_X86_PGTABLE_3LEVEL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <asm/atomic64_32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Intel Physical Address Extension (PAE) Mode - three-level page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * tables on PPro+ CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define pte_ERROR(e) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) pr_err("%s:%d: bad pte %p(%08lx%08lx)\n", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define pmd_ERROR(e) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) pr_err("%s:%d: bad pmd %p(%016Lx)\n", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) __FILE__, __LINE__, &(e), pmd_val(e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define pgd_ERROR(e) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) pr_err("%s:%d: bad pgd %p(%016Lx)\n", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) __FILE__, __LINE__, &(e), pgd_val(e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /* Rules for using set_pte: the pte being assigned *must* be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * either not present or in a state where the hardware will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * not attempt to update the pte. In places where this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * not possible, use pte_get_and_clear to obtain the old pte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * value and then use set_pte to update it. -ben
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static inline void native_set_pte(pte_t *ptep, pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) ptep->pte_high = pte.pte_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) ptep->pte_low = pte.pte_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define pmd_read_atomic pmd_read_atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * pte_offset_map_lock() on 32-bit PAE kernels was reading the pmd_t with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * a "*pmdp" dereference done by GCC. Problem is, in certain places
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * where pte_offset_map_lock() is called, concurrent page faults are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * allowed, if the mmap_lock is hold for reading. An example is mincore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * vs page faults vs MADV_DONTNEED. On the page fault side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * pmd_populate() rightfully does a set_64bit(), but if we're reading the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * because GCC will not read the 64-bit value of the pmd atomically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * To fix this all places running pte_offset_map_lock() while holding the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * mmap_lock in read mode, shall read the pmdp pointer using this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * function to know if the pmd is null or not, and in turn to know if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * they can run pte_offset_map_lock() or pmd_trans_huge() or other pmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * Without THP if the mmap_lock is held for reading, the pmd can only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * transition from null to not null while pmd_read_atomic() runs. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * we can always return atomic pmd values with this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * With THP if the mmap_lock is held for reading, the pmd can become
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * trans_huge or none or point to a pte (and in turn become "stable")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * at any time under pmd_read_atomic(). We could read it truly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * atomically here with an atomic64_read() for the THP enabled case (and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * it would be a whole lot simpler), but to avoid using cmpxchg8b we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * only return an atomic pmdval if the low part of the pmdval is later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * found to be stable (i.e. pointing to a pte). We are also returning a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * 'none' (zero) pmdval if the low part of the pmd is zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * In some cases the high and low part of the pmdval returned may not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * consistent if THP is enabled (the low part may point to previously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * mapped hugepage, while the high part may point to a more recently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * mapped hugepage), but pmd_none_or_trans_huge_or_clear_bad() only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * needs the low part of the pmd to be read atomically to decide if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * pmd is unstable or not, with the only exception when the low part
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * of the pmd is zero, in which case we return a 'none' pmd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) pmdval_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) u32 *tmp = (u32 *)pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) ret = (pmdval_t) (*tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * If the low part is null, we must not read the high part
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * or we can end up with a partial pmd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) ret |= ((pmdval_t)*(tmp + 1)) << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return (pmd_t) { ret };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static inline void native_set_pud(pud_t *pudp, pud_t pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #ifdef CONFIG_PAGE_TABLE_ISOLATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) pud.p4d.pgd = pti_set_user_pgtbl(&pudp->p4d.pgd, pud.p4d.pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * entry, so clear the bottom half first and enforce ordering with a compiler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * barrier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ptep->pte_low = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) ptep->pte_high = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static inline void native_pmd_clear(pmd_t *pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) u32 *tmp = (u32 *)pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) *tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) *(tmp + 1) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static inline void native_pud_clear(pud_t *pudp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static inline void pud_clear(pud_t *pudp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) set_pud(pudp, __pud(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * According to Intel App note "TLBs, Paging-Structure Caches,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * and Their Invalidation", April 2007, document 317080-001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * section 8.1: in PAE mode we explicitly have to flush the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * TLB via cr3 if the top-level pgd is changed...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * Currently all places where pud_clear() is called either have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * flush_tlb_mm() followed or don't need TLB flush (x86_64 code or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * pud_clear_bad()), so we don't need TLB flush here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) pte_t res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) res.pte = (pteval_t)arch_atomic64_xchg((atomic64_t *)ptep, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) union split_pmd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) u32 pmd_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) u32 pmd_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) pmd_t pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) union split_pmd res, *orig = (union split_pmd *)pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* xchg acts as a barrier before setting of the high bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) res.pmd_low = xchg(&orig->pmd_low, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) res.pmd_high = orig->pmd_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) orig->pmd_high = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return res.pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #ifndef pmdp_establish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define pmdp_establish pmdp_establish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) unsigned long address, pmd_t *pmdp, pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) pmd_t old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * If pmd has present bit cleared we can get away without expensive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * cmpxchg64: we can update pmdp half-by-half without racing with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * anybody.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (!(pmd_val(pmd) & _PAGE_PRESENT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) union split_pmd old, new, *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) ptr = (union split_pmd *)pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) new.pmd = pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /* xchg acts as a barrier before setting of the high bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) old.pmd_low = xchg(&ptr->pmd_low, new.pmd_low);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) old.pmd_high = ptr->pmd_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) ptr->pmd_high = new.pmd_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return old.pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) old = *pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) union split_pud {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) u32 pud_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) u32 pud_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) pud_t pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static inline pud_t native_pudp_get_and_clear(pud_t *pudp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) union split_pud res, *orig = (union split_pud *)pudp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #ifdef CONFIG_PAGE_TABLE_ISOLATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) pti_set_user_pgtbl(&pudp->p4d.pgd, __pgd(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /* xchg acts as a barrier before setting of the high bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) res.pud_low = xchg(&orig->pud_low, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) res.pud_high = orig->pud_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) orig->pud_high = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return res.pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) #define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* Encode and de-code a swap entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) #define SWP_TYPE_BITS 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) #define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /* We always extract/encode the offset by shifting it all the way up, and then down again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) #define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT + SWP_TYPE_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) #define __swp_type(x) (((x).val) & 0x1f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) #define __swp_offset(x) ((x).val >> 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * Normally, __swp_entry() converts from arch-independent swp_entry_t to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * arch-dependent swp_entry_t, and __swp_entry_to_pte() just stores the result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * to pte. But here we have 32bit swp_entry_t and 64bit pte, and need to use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * whole 64 bits. Thus, we shift the "real" arch-dependent conversion to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * __swp_entry_to_pte() through the following helper macro based on 64bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * __swp_entry().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #define __swp_pteval_entry(type, offset) ((pteval_t) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) (~(pteval_t)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) | ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) #define __swp_entry_to_pte(x) ((pte_t){ .pte = \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) __swp_pteval_entry(__swp_type(x), __swp_offset(x)) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * Analogically, __pte_to_swp_entry() doesn't just extract the arch-dependent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * swp_entry_t, but also has to convert it from 64bit to the 32bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * intermediate representation, using the following macros based on 64bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * __swp_type() and __swp_offset().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) #define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #define __pteval_swp_offset(x) ((unsigned long)(~((x).pte) << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) #define __pte_to_swp_entry(pte) (__swp_entry(__pteval_swp_type(pte), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) __pteval_swp_offset(pte)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) #include <asm/pgtable-invert.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #endif /* _ASM_X86_PGTABLE_3LEVEL_H */