^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef __KVM_X86_MMU_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define __KVM_X86_MMU_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "kvm_cache_regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "cpuid.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define PT64_PT_BITS 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define PT32_PT_BITS 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define PT_WRITABLE_SHIFT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define PT_USER_SHIFT 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define PT_PRESENT_MASK (1ULL << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define PT_USER_MASK (1ULL << PT_USER_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define PT_PWT_MASK (1ULL << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define PT_PCD_MASK (1ULL << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define PT_ACCESSED_SHIFT 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define PT_DIRTY_SHIFT 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define PT_PAGE_SIZE_SHIFT 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define PT_PAT_MASK (1ULL << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define PT_GLOBAL_MASK (1ULL << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define PT64_NX_SHIFT 63
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define PT_PAT_SHIFT 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define PT_DIR_PAT_SHIFT 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define PT32_DIR_PSE36_SIZE 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define PT32_DIR_PSE36_SHIFT 13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define PT32_DIR_PSE36_MASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define PT64_ROOT_5LEVEL 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define PT64_ROOT_4LEVEL 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define PT32_ROOT_LEVEL 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define PT32E_ROOT_LEVEL 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static inline u64 rsvd_bits(int s, int e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (e < s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return ((2ULL << (e - s)) - 1) << s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) gpa_t nested_cr3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) bool accessed_dirty, gpa_t new_eptp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) u64 fault_address, char *insn, int insn_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return kvm_mmu_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) ? cr3 & X86_CR3_PCID_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) u64 root_hpa = vcpu->arch.mmu->root_hpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (!VALID_PAGE(root_hpa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) kvm_x86_ops.load_mmu_pgd(vcpu, root_hpa | kvm_get_active_pcid(vcpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) vcpu->arch.mmu->shadow_root_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) bool prefault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) u32 err, bool prefault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #ifdef CONFIG_RETPOLINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return kvm_tdp_page_fault(vcpu, cr2_or_gpa, err, prefault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa, err, prefault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * Currently, we have two sorts of write-protection, a) the first one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * write-protects guest page to sync the guest modification, b) another one is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * used to sync dirty bitmap when we do KVM_GET_DIRTY_LOG. The differences
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * between these two sorts are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * 1) the first case clears SPTE_MMU_WRITEABLE bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * 2) the first case requires flushing tlb immediately avoiding corrupting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * shadow page table between all vcpus so it should be in the protection of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * mmu-lock. And the another case does not need to flush tlb until returning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * the dirty bitmap to userspace since it only write-protects the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * logged in the bitmap, that means the page in the dirty bitmap is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * missed, so it can flush tlb out of mmu-lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * So, there is the problem: the first case can meet the corrupted tlb caused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * by another case which write-protects pages but without flush tlb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * immediately. In order to making the first case be aware this problem we let
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * it flush tlb if we try to write-protect a spte whose SPTE_MMU_WRITEABLE bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * is set, it works since another case never touches SPTE_MMU_WRITEABLE bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * Anyway, whenever a spte is updated (only permission and status bits are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * changed) we need to check whether the spte with SPTE_MMU_WRITEABLE becomes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * readonly, if that happens, we need to flush tlb. Fortunately,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * mmu_spte_update() has already handled it perfectly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * The rules to use SPTE_MMU_WRITEABLE and PT_WRITABLE_MASK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * - if we want to see if it has writable tlb entry or if the spte can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * writable on the mmu mapping, check SPTE_MMU_WRITEABLE, this is the most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * case, otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * - if we fix page fault on the spte or do write-protection by dirty logging,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * check PT_WRITABLE_MASK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * TODO: introduce APIs to split these two cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static inline int is_writable_pte(unsigned long pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return pte & PT_WRITABLE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static inline bool is_write_protection(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * Check if a given access (described through the I/D, W/R and U/S bits of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * page fault error code pfec) causes a permission fault with the given PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * access rights (in ACC_* format).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * Return zero if the access does not fault; return the page fault error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * if the access faults.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) unsigned pte_access, unsigned pte_pkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) unsigned pfec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) int cpl = kvm_x86_ops.get_cpl(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unsigned long rflags = kvm_x86_ops.get_rflags(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * If CPL = 3, SMAP applies to all supervisor-mode data accesses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * (these are implicit supervisor accesses) regardless of the value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * of EFLAGS.AC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * the result in X86_EFLAGS_AC. We then insert it in place of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * but it will be one in index if SMAP checks are being overridden.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * It is important to keep this branchless.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int index = (pfec >> 1) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) bool fault = (mmu->permissions[index] >> pte_access) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) u32 errcode = PFERR_PRESENT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (unlikely(mmu->pkru_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) u32 pkru_bits, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * PKRU defines 32 bits, there are 16 domains and 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * attribute bits per domain in pkru. pte_pkey is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * index of the protection domain, so pte_pkey * 2 is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * is the index of the first bit for the domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) offset = (pfec & ~1) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) pkru_bits &= mmu->pkru_mask >> offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) errcode |= -pkru_bits & PFERR_PK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) fault |= (pkru_bits != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return -(u32)fault & errcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) int kvm_mmu_post_init_vm(struct kvm *kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) #endif