^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright IBM Corp. 2007, 2011
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/swapops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/sysctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/ksm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/tlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/page-states.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) pgprot_t pgprot_writecombine(pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * mio_wb_bit_mask may be set on a different CPU, but it is only set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * once at init and only read afterwards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return __pgprot(pgprot_val(prot) | mio_wb_bit_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) EXPORT_SYMBOL_GPL(pgprot_writecombine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) pgprot_t pgprot_writethrough(pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * mio_wb_bit_mask may be set on a different CPU, but it is only set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * once at init and only read afterwards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return __pgprot(pgprot_val(prot) & ~mio_wb_bit_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) EXPORT_SYMBOL_GPL(pgprot_writethrough);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) pte_t *ptep, int nodat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) unsigned long opt, asce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (MACHINE_HAS_TLB_GUEST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) opt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) asce = READ_ONCE(mm->context.gmap_asce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (asce == 0UL || nodat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) opt |= IPTE_NODAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (asce != -1UL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) asce = asce ? : mm->context.asce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) opt |= IPTE_GUEST_ASCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) __ptep_ipte(addr, ptep, opt, asce, IPTE_LOCAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) __ptep_ipte(addr, ptep, 0, 0, IPTE_LOCAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) pte_t *ptep, int nodat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) unsigned long opt, asce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (MACHINE_HAS_TLB_GUEST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) opt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) asce = READ_ONCE(mm->context.gmap_asce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (asce == 0UL || nodat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) opt |= IPTE_NODAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (asce != -1UL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) asce = asce ? : mm->context.asce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) opt |= IPTE_GUEST_ASCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) __ptep_ipte(addr, ptep, opt, asce, IPTE_GLOBAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static inline pte_t ptep_flush_direct(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned long addr, pte_t *ptep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) int nodat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) pte_t old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) old = *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (unlikely(pte_val(old) & _PAGE_INVALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) atomic_inc(&mm->context.flush_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (MACHINE_HAS_TLB_LC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) ptep_ipte_local(mm, addr, ptep, nodat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) ptep_ipte_global(mm, addr, ptep, nodat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) atomic_dec(&mm->context.flush_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) unsigned long addr, pte_t *ptep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) int nodat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) pte_t old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) old = *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (unlikely(pte_val(old) & _PAGE_INVALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) atomic_inc(&mm->context.flush_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (cpumask_equal(&mm->context.cpu_attach_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) cpumask_of(smp_processor_id()))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) pte_val(*ptep) |= _PAGE_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) mm->context.flush_mm = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) ptep_ipte_global(mm, addr, ptep, nodat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) atomic_dec(&mm->context.flush_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static inline pgste_t pgste_get_lock(pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) unsigned long new = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #ifdef CONFIG_PGSTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) unsigned long old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) asm(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) " lg %0,%2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) "0: lgr %1,%0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) " nihh %0,0xff7f\n" /* clear PCL bit in old */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) " oihh %1,0x0080\n" /* set PCL bit in new */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) " csg %0,%1,%2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) " jl 0b\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return __pgste(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #ifdef CONFIG_PGSTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) asm(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) " nihh %1,0xff7f\n" /* clear PCL bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) " stg %1,%0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) : "=Q" (ptep[PTRS_PER_PTE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) : "cc", "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static inline pgste_t pgste_get(pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) unsigned long pgste = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #ifdef CONFIG_PGSTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return __pgste(pgste);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static inline void pgste_set(pte_t *ptep, pgste_t pgste)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #ifdef CONFIG_PGSTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #ifdef CONFIG_PGSTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) unsigned long address, bits, skey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (!mm_uses_skeys(mm) || pte_val(pte) & _PAGE_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return pgste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) address = pte_val(pte) & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) skey = (unsigned long) page_get_storage_key(address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /* Transfer page changed & referenced bit to guest bits in pgste */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /* Copy page access key and fetch protection bit to pgste */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return pgste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #ifdef CONFIG_PGSTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) unsigned long address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) unsigned long nkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (!mm_uses_skeys(mm) || pte_val(entry) & _PAGE_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) address = pte_val(entry) & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * Set page access key and fetch protection bit from pgste.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * The guest C/R information is still in the PGSTE, set real
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * key C/R to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) page_set_storage_key(address, nkey, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) #ifdef CONFIG_PGSTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if ((pte_val(entry) & _PAGE_PRESENT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) (pte_val(entry) & _PAGE_WRITE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) !(pte_val(entry) & _PAGE_INVALID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (!MACHINE_HAS_ESOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * Without enhanced suppression-on-protection force
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * the dirty bit on for all writable ptes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) pte_val(entry) |= _PAGE_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) pte_val(entry) &= ~_PAGE_PROTECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (!(pte_val(entry) & _PAGE_PROTECT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* This pte allows write access, set user-dirty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) pgste_val(pgste) |= PGSTE_UC_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) *ptep = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return pgste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static inline pgste_t pgste_pte_notify(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) pte_t *ptep, pgste_t pgste)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) #ifdef CONFIG_PGSTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) unsigned long bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) bits = pgste_val(pgste) & (PGSTE_IN_BIT | PGSTE_VSIE_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) pgste_val(pgste) ^= bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) ptep_notify(mm, addr, ptep, bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return pgste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) unsigned long addr, pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) pgste_t pgste = __pgste(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (mm_has_pgste(mm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) pgste = pgste_get_lock(ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) pgste = pgste_pte_notify(mm, addr, ptep, pgste);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return pgste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) unsigned long addr, pte_t *ptep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) pgste_t pgste, pte_t old, pte_t new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (mm_has_pgste(mm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (pte_val(old) & _PAGE_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) pgste_set_key(ptep, pgste, new, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (pte_val(new) & _PAGE_INVALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) pgste = pgste_update_all(old, pgste, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) _PGSTE_GPS_USAGE_UNUSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) pte_val(old) |= _PAGE_UNUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) pgste = pgste_set_pte(ptep, pgste, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) pgste_set_unlock(ptep, pgste);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) *ptep = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) pte_t *ptep, pte_t new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) pgste_t pgste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) pte_t old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) int nodat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) pgste = ptep_xchg_start(mm, addr, ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) old = ptep_flush_direct(mm, addr, ptep, nodat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) EXPORT_SYMBOL(ptep_xchg_direct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) pte_t *ptep, pte_t new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) pgste_t pgste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) pte_t old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) int nodat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) pgste = ptep_xchg_start(mm, addr, ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) old = ptep_flush_lazy(mm, addr, ptep, nodat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) EXPORT_SYMBOL(ptep_xchg_lazy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) pgste_t pgste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) pte_t old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) int nodat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct mm_struct *mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) pgste = ptep_xchg_start(mm, addr, ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) old = ptep_flush_lazy(mm, addr, ptep, nodat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (mm_has_pgste(mm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) pgste = pgste_update_all(old, pgste, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) pgste_set(ptep, pgste);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) pte_t *ptep, pte_t old_pte, pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) pgste_t pgste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct mm_struct *mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (!MACHINE_HAS_NX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) pte_val(pte) &= ~_PAGE_NOEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (mm_has_pgste(mm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) pgste = pgste_get(ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) pgste_set_key(ptep, pgste, pte, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) pgste = pgste_set_pte(ptep, pgste, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) pgste_set_unlock(ptep, pgste);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) *ptep = pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static inline void pmdp_idte_local(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) unsigned long addr, pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (MACHINE_HAS_TLB_GUEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) __pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) mm->context.asce, IDTE_LOCAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) __pmdp_idte(addr, pmdp, 0, 0, IDTE_LOCAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) gmap_pmdp_idte_local(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static inline void pmdp_idte_global(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) unsigned long addr, pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (MACHINE_HAS_TLB_GUEST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) __pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) mm->context.asce, IDTE_GLOBAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) gmap_pmdp_idte_global(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) } else if (MACHINE_HAS_IDTE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) __pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) gmap_pmdp_idte_global(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) __pmdp_csp(pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) gmap_pmdp_csp(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) unsigned long addr, pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) pmd_t old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) old = *pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) atomic_inc(&mm->context.flush_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (MACHINE_HAS_TLB_LC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) pmdp_idte_local(mm, addr, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) pmdp_idte_global(mm, addr, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) atomic_dec(&mm->context.flush_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) unsigned long addr, pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) pmd_t old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) old = *pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) atomic_inc(&mm->context.flush_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (cpumask_equal(&mm->context.cpu_attach_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) cpumask_of(smp_processor_id()))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) mm->context.flush_mm = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (mm_has_pgste(mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) gmap_pmdp_invalidate(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) pmdp_idte_global(mm, addr, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) atomic_dec(&mm->context.flush_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) #ifdef CONFIG_PGSTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) pgd = pgd_offset(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) p4d = p4d_alloc(mm, pgd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (!p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) pud = pud_alloc(mm, p4d, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (!pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) pmd = pmd_alloc(mm, pud, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) pmd_t *pmdp, pmd_t new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) pmd_t old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) old = pmdp_flush_direct(mm, addr, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) *pmdp = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) EXPORT_SYMBOL(pmdp_xchg_direct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) pmd_t *pmdp, pmd_t new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) pmd_t old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) old = pmdp_flush_lazy(mm, addr, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) *pmdp = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) EXPORT_SYMBOL(pmdp_xchg_lazy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static inline void pudp_idte_local(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) unsigned long addr, pud_t *pudp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (MACHINE_HAS_TLB_GUEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) __pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) mm->context.asce, IDTE_LOCAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) __pudp_idte(addr, pudp, 0, 0, IDTE_LOCAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) static inline void pudp_idte_global(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) unsigned long addr, pud_t *pudp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (MACHINE_HAS_TLB_GUEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) __pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) mm->context.asce, IDTE_GLOBAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) else if (MACHINE_HAS_IDTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) __pudp_idte(addr, pudp, 0, 0, IDTE_GLOBAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * Invalid bit position is the same for pmd and pud, so we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * re-use _pmd_csp() here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) __pmdp_csp((pmd_t *) pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) static inline pud_t pudp_flush_direct(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) unsigned long addr, pud_t *pudp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) pud_t old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) old = *pudp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (pud_val(old) & _REGION_ENTRY_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) atomic_inc(&mm->context.flush_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (MACHINE_HAS_TLB_LC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) pudp_idte_local(mm, addr, pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) pudp_idte_global(mm, addr, pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) atomic_dec(&mm->context.flush_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) pud_t *pudp, pud_t new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) pud_t old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) old = pudp_flush_direct(mm, addr, pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) *pudp = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) EXPORT_SYMBOL(pudp_xchg_direct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) pgtable_t pgtable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct list_head *lh = (struct list_head *) pgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) assert_spin_locked(pmd_lockptr(mm, pmdp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) /* FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (!pmd_huge_pte(mm, pmdp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) INIT_LIST_HEAD(lh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) pmd_huge_pte(mm, pmdp) = pgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct list_head *lh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) pgtable_t pgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) pte_t *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) assert_spin_locked(pmd_lockptr(mm, pmdp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) /* FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) pgtable = pmd_huge_pte(mm, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) lh = (struct list_head *) pgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (list_empty(lh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) pmd_huge_pte(mm, pmdp) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) list_del(lh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) ptep = (pte_t *) pgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) pte_val(*ptep) = _PAGE_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) ptep++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) pte_val(*ptep) = _PAGE_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return pgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) #ifdef CONFIG_PGSTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) pte_t *ptep, pte_t entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) pgste_t pgste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) /* the mm_has_pgste() check is done in set_pte_at() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) pgste = pgste_get_lock(ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) pgste_set_key(ptep, pgste, entry, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) pgste = pgste_set_pte(ptep, pgste, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) pgste_set_unlock(ptep, pgste);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) pgste_t pgste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) pgste = pgste_get_lock(ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) pgste_val(pgste) |= PGSTE_IN_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) pgste_set_unlock(ptep, pgste);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * ptep_force_prot - change access rights of a locked pte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * @mm: pointer to the process mm_struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * @addr: virtual address in the guest address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * @ptep: pointer to the page table entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * @prot: indicates guest access rights: PROT_NONE, PROT_READ or PROT_WRITE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * @bit: pgste bit to set (e.g. for notification)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * Returns 0 if the access rights were changed and -EAGAIN if the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * and requested access rights are incompatible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) int ptep_force_prot(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) pte_t *ptep, int prot, unsigned long bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) pte_t entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) pgste_t pgste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) int pte_i, pte_p, nodat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) pgste = pgste_get_lock(ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) entry = *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /* Check pte entry after all locks have been acquired */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) pte_i = pte_val(entry) & _PAGE_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) pte_p = pte_val(entry) & _PAGE_PROTECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if ((pte_i && (prot != PROT_NONE)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) (pte_p && (prot & PROT_WRITE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) pgste_set_unlock(ptep, pgste);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) /* Change access rights and set pgste bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (prot == PROT_NONE && !pte_i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) ptep_flush_direct(mm, addr, ptep, nodat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) pgste = pgste_update_all(entry, pgste, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) pte_val(entry) |= _PAGE_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (prot == PROT_READ && !pte_p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) ptep_flush_direct(mm, addr, ptep, nodat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) pte_val(entry) &= ~_PAGE_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) pte_val(entry) |= _PAGE_PROTECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) pgste_val(pgste) |= bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) pgste = pgste_set_pte(ptep, pgste, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) pgste_set_unlock(ptep, pgste);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) pte_t *sptep, pte_t *tptep, pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) pgste_t spgste, tpgste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) pte_t spte, tpte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) int rc = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (!(pte_val(*tptep) & _PAGE_INVALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) return 0; /* already shadowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) spgste = pgste_get_lock(sptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) spte = *sptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (!(pte_val(spte) & _PAGE_INVALID) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) !((pte_val(spte) & _PAGE_PROTECT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) !(pte_val(pte) & _PAGE_PROTECT))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) pgste_val(spgste) |= PGSTE_VSIE_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) tpgste = pgste_get_lock(tptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) pte_val(tpte) = (pte_val(spte) & PAGE_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) (pte_val(pte) & _PAGE_PROTECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /* don't touch the storage key - it belongs to parent pgste */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) tpgste = pgste_set_pte(tptep, tpgste, tpte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) pgste_set_unlock(tptep, tpgste);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) pgste_set_unlock(sptep, spgste);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) pgste_t pgste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) int nodat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) pgste = pgste_get_lock(ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /* notifier is called by the caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) ptep_flush_direct(mm, saddr, ptep, nodat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /* don't touch the storage key - it belongs to parent pgste */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) pgste = pgste_set_pte(ptep, pgste, __pte(_PAGE_INVALID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) pgste_set_unlock(ptep, pgste);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (!non_swap_entry(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) dec_mm_counter(mm, MM_SWAPENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) else if (is_migration_entry(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) struct page *page = migration_entry_to_page(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) dec_mm_counter(mm, mm_counter(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) free_swap_and_cache(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) pte_t *ptep, int reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) unsigned long pgstev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) pgste_t pgste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) pte_t pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) /* Zap unused and logically-zero pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) pgste = pgste_get_lock(ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) pgstev = pgste_val(pgste);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) pte = *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (!reset && pte_swap(pte) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) (pgstev & _PGSTE_GPS_ZERO))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) pte_clear(mm, addr, ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) pgste_set_unlock(ptep, pgste);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) unsigned long ptev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) pgste_t pgste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /* Clear storage key ACC and F, but set R/C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) pgste = pgste_get_lock(ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) ptev = pte_val(*ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) pgste_set_unlock(ptep, pgste);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * Test and reset if a guest page is dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) pgste_t pgste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) pte_t pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) bool dirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) int nodat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) pgste = pgste_get_lock(ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) pgste_val(pgste) &= ~PGSTE_UC_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) pte = *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) pgste = pgste_pte_notify(mm, addr, ptep, pgste);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) ptep_ipte_global(mm, addr, ptep, nodat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) pte_val(pte) |= _PAGE_PROTECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) pte_val(pte) |= _PAGE_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) *ptep = pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) pgste_set_unlock(ptep, pgste);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) return dirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) EXPORT_SYMBOL_GPL(ptep_test_and_clear_uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) unsigned char key, bool nq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) unsigned long keyul, paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) pgste_t old, new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) pmd_t *pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) pte_t *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) pmdp = pmd_alloc_map(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (unlikely(!pmdp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) ptl = pmd_lock(mm, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (!pmd_present(*pmdp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) spin_unlock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (pmd_large(*pmdp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) paddr = pmd_val(*pmdp) & HPAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) paddr |= addr & ~HPAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * Huge pmds need quiescing operations, they are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * always mapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) page_set_storage_key(paddr, key, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) spin_unlock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) spin_unlock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (unlikely(!ptep))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) new = old = pgste_get_lock(ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) PGSTE_ACC_BITS | PGSTE_FP_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) keyul = (unsigned long) key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (!(pte_val(*ptep) & _PAGE_INVALID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) unsigned long bits, skey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) paddr = pte_val(*ptep) & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) skey = (unsigned long) page_get_storage_key(paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /* Set storage key ACC and FP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) page_set_storage_key(paddr, skey, !nq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) /* Merge host changed & referenced into pgste */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) pgste_val(new) |= bits << 52;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) /* changing the guest storage key is considered a change of the page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if ((pgste_val(new) ^ pgste_val(old)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) pgste_val(new) |= PGSTE_UC_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) pgste_set_unlock(ptep, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) pte_unmap_unlock(ptep, ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) EXPORT_SYMBOL(set_guest_storage_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * Conditionally set a guest storage key (handling csske).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * oldkey will be updated when either mr or mc is set and a pointer is given.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * Returns 0 if a guests storage key update wasn't necessary, 1 if the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * storage key was updated and -EFAULT on access errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) unsigned char key, unsigned char *oldkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) bool nq, bool mr, bool mc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) unsigned char tmp, mask = _PAGE_ACC_BITS | _PAGE_FP_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /* we can drop the pgste lock between getting and setting the key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (mr | mc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) rc = get_guest_storage_key(current->mm, addr, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (oldkey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) *oldkey = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (!mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) mask |= _PAGE_REFERENCED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (!mc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) mask |= _PAGE_CHANGED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (!((tmp ^ key) & mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) rc = set_guest_storage_key(current->mm, addr, key, nq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) return rc < 0 ? rc : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) EXPORT_SYMBOL(cond_set_guest_storage_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * Reset a guest reference bit (rrbe), returning the reference and changed bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * Returns < 0 in case of error, otherwise the cc to be reported to the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) unsigned long paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) pgste_t old, new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) pmd_t *pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) pte_t *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) int cc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) pmdp = pmd_alloc_map(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (unlikely(!pmdp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) ptl = pmd_lock(mm, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) if (!pmd_present(*pmdp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) spin_unlock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (pmd_large(*pmdp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) paddr = pmd_val(*pmdp) & HPAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) paddr |= addr & ~HPAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) cc = page_reset_referenced(paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) spin_unlock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) spin_unlock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (unlikely(!ptep))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) new = old = pgste_get_lock(ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) /* Reset guest reference bit only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) pgste_val(new) &= ~PGSTE_GR_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (!(pte_val(*ptep) & _PAGE_INVALID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) paddr = pte_val(*ptep) & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) cc = page_reset_referenced(paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) /* Merge real referenced bit into host-set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) pgste_val(new) |= ((unsigned long) cc << 53) & PGSTE_HR_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) /* Reflect guest's logical view, not physical */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) cc |= (pgste_val(old) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 49;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) /* Changing the guest storage key is considered a change of the page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if ((pgste_val(new) ^ pgste_val(old)) & PGSTE_GR_BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) pgste_val(new) |= PGSTE_UC_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) pgste_set_unlock(ptep, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) pte_unmap_unlock(ptep, ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) EXPORT_SYMBOL(reset_guest_reference_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) unsigned char *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) unsigned long paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) pgste_t pgste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) pmd_t *pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) pte_t *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) pmdp = pmd_alloc_map(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (unlikely(!pmdp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) ptl = pmd_lock(mm, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (!pmd_present(*pmdp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) /* Not yet mapped memory has a zero key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) spin_unlock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) *key = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (pmd_large(*pmdp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) paddr = pmd_val(*pmdp) & HPAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) paddr |= addr & ~HPAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) *key = page_get_storage_key(paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) spin_unlock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) spin_unlock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (unlikely(!ptep))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) pgste = pgste_get_lock(ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) *key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) paddr = pte_val(*ptep) & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (!(pte_val(*ptep) & _PAGE_INVALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) *key = page_get_storage_key(paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) /* Reflect guest's logical view, not physical */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) *key |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) pgste_set_unlock(ptep, pgste);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) pte_unmap_unlock(ptep, ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) EXPORT_SYMBOL(get_guest_storage_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * pgste_perform_essa - perform ESSA actions on the PGSTE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * @mm: the memory context. It must have PGSTEs, no check is performed here!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * @hva: the host virtual address of the page whose PGSTE is to be processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * @orc: the specific action to perform, see the ESSA_SET_* macros.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * @oldpte: the PTE will be saved there if the pointer is not NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * @oldpgste: the old PGSTE will be saved there if the pointer is not NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * Return: 1 if the page is to be added to the CBRL, otherwise 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * or < 0 in case of error. -EINVAL is returned for invalid values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) * of orc, -EFAULT for invalid addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) unsigned long *oldpte, unsigned long *oldpgste)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) unsigned long pgstev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) pgste_t pgste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) pte_t *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) int res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) WARN_ON_ONCE(orc > ESSA_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (unlikely(orc > ESSA_MAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) vma = find_vma(mm, hva);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) ptep = get_locked_pte(mm, hva, &ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (unlikely(!ptep))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) pgste = pgste_get_lock(ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) pgstev = pgste_val(pgste);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (oldpte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) *oldpte = pte_val(*ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (oldpgste)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) *oldpgste = pgstev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) switch (orc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) case ESSA_GET_STATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) case ESSA_SET_STABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) pgstev &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) pgstev |= _PGSTE_GPS_USAGE_STABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) case ESSA_SET_UNUSED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) pgstev &= ~_PGSTE_GPS_USAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) pgstev |= _PGSTE_GPS_USAGE_UNUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (pte_val(*ptep) & _PAGE_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) res = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) case ESSA_SET_VOLATILE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) pgstev &= ~_PGSTE_GPS_USAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (pte_val(*ptep) & _PAGE_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) res = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) case ESSA_SET_POT_VOLATILE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) pgstev &= ~_PGSTE_GPS_USAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (!(pte_val(*ptep) & _PAGE_INVALID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) pgstev |= _PGSTE_GPS_USAGE_POT_VOLATILE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (pgstev & _PGSTE_GPS_ZERO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (!(pgstev & PGSTE_GC_BIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) res = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) case ESSA_SET_STABLE_RESIDENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) pgstev &= ~_PGSTE_GPS_USAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) pgstev |= _PGSTE_GPS_USAGE_STABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * Since the resident state can go away any time after this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * call, we will not make this page resident. We can revisit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) * this decision if a guest will ever start using this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) case ESSA_SET_STABLE_IF_RESIDENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (!(pte_val(*ptep) & _PAGE_INVALID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) pgstev &= ~_PGSTE_GPS_USAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) pgstev |= _PGSTE_GPS_USAGE_STABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) case ESSA_SET_STABLE_NODAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) pgstev &= ~_PGSTE_GPS_USAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) pgstev |= _PGSTE_GPS_USAGE_STABLE | _PGSTE_GPS_NODAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) /* we should never get here! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) /* If we are discarding a page, set it to logical zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) pgstev |= _PGSTE_GPS_ZERO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) pgste_val(pgste) = pgstev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) pgste_set_unlock(ptep, pgste);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) pte_unmap_unlock(ptep, ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) EXPORT_SYMBOL(pgste_perform_essa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) * set_pgste_bits - set specific PGSTE bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * @mm: the memory context. It must have PGSTEs, no check is performed here!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) * @hva: the host virtual address of the page whose PGSTE is to be processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * @bits: a bitmask representing the bits that will be touched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * @value: the values of the bits to be written. Only the bits in the mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * will be written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * Return: 0 on success, < 0 in case of error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) int set_pgste_bits(struct mm_struct *mm, unsigned long hva,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) unsigned long bits, unsigned long value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) pgste_t new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) pte_t *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) vma = find_vma(mm, hva);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) ptep = get_locked_pte(mm, hva, &ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (unlikely(!ptep))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) new = pgste_get_lock(ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) pgste_val(new) &= ~bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) pgste_val(new) |= value & bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) pgste_set_unlock(ptep, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) pte_unmap_unlock(ptep, ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) EXPORT_SYMBOL(set_pgste_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * get_pgste - get the current PGSTE for the given address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * @mm: the memory context. It must have PGSTEs, no check is performed here!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * @hva: the host virtual address of the page whose PGSTE is to be processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * @pgstep: will be written with the current PGSTE for the given address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * Return: 0 on success, < 0 in case of error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) pte_t *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) vma = find_vma(mm, hva);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) ptep = get_locked_pte(mm, hva, &ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (unlikely(!ptep))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) *pgstep = pgste_val(pgste_get(ptep));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) pte_unmap_unlock(ptep, ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) EXPORT_SYMBOL(get_pgste);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) #endif