^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Kernel-based Virtual Machine driver for Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Macros and functions to access KVM PTEs (also known as SPTEs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2006 Qumranet, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright 2020 Red Hat, Inc. and/or its affiliates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "mmu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "mmu_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "x86.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "spte.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/e820/api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) u64 __read_mostly shadow_nx_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) u64 __read_mostly shadow_user_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) u64 __read_mostly shadow_accessed_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) u64 __read_mostly shadow_dirty_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) u64 __read_mostly shadow_mmio_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) u64 __read_mostly shadow_mmio_access_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) u64 __read_mostly shadow_present_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) u64 __read_mostly shadow_me_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) u64 __read_mostly shadow_acc_track_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) u8 __read_mostly shadow_phys_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static u64 generation_mmio_spte_mask(u64 gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) WARN_ON(gen & ~MMIO_SPTE_GEN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) BUILD_BUG_ON((MMIO_SPTE_GEN_HIGH_MASK | MMIO_SPTE_GEN_LOW_MASK) & SPTE_SPECIAL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) u64 mask = generation_mmio_spte_mask(gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) u64 gpa = gfn << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) access &= shadow_mmio_access_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) mask |= shadow_mmio_value | access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) mask |= gpa | shadow_nonpresent_or_rsvd_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) << SHADOW_NONPRESENT_OR_RSVD_MASK_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (pfn_valid(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * Some reserved pages, such as those from NVDIMM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * DAX devices, are not for MMIO, and can be mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * with cached memory type for better performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * However, the above check misconceives those pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * as MMIO, and results in KVM mapping them with UC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * memory type, which would hurt the performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * Therefore, we check the host memory type in addition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * and only treat UC/UC-/WC pages as MMIO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) (!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return !e820__mapped_raw_any(pfn_to_hpa(pfn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) pfn_to_hpa(pfn + 1) - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) E820_TYPE_RAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) bool can_unsync, bool host_writable, bool ad_disabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) u64 *new_spte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) u64 spte = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (ad_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) spte |= SPTE_AD_DISABLED_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) else if (kvm_vcpu_ad_need_write_protect(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) spte |= SPTE_AD_WRPROT_ONLY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * For the EPT case, shadow_present_mask is 0 if hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * supports exec-only page table entries. In that case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * ACC_USER_MASK and shadow_user_mask are used to represent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * read access. See FNAME(gpte_access) in paging_tmpl.h.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) spte |= shadow_present_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (!speculative)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) spte |= spte_shadow_accessed_mask(spte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) is_nx_huge_page_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) pte_access &= ~ACC_EXEC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (pte_access & ACC_EXEC_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) spte |= shadow_x_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) spte |= shadow_nx_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (pte_access & ACC_USER_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) spte |= shadow_user_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (level > PG_LEVEL_4K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) spte |= PT_PAGE_SIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (tdp_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) spte |= kvm_x86_ops.get_mt_mask(vcpu, gfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) kvm_is_mmio_pfn(pfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (host_writable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) spte |= SPTE_HOST_WRITEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) pte_access &= ~ACC_WRITE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (!kvm_is_mmio_pfn(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) spte |= shadow_me_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) spte |= (u64)pfn << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (pte_access & ACC_WRITE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * Optimization: for pte sync, if spte was writable the hash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * lookup is unnecessary (and expensive). Write protection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * is responsibility of mmu_get_page / kvm_sync_page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * Same reasoning can be applied to dirty page accounting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (!can_unsync && is_writable_pte(old_spte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) pgprintk("%s: found shadow page for %llx, marking ro\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) __func__, gfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) ret |= SET_SPTE_WRITE_PROTECTED_PT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) pte_access &= ~ACC_WRITE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (pte_access & ACC_WRITE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) spte |= spte_shadow_dirty_mask(spte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (speculative)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) spte = mark_spte_for_access_track(spte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) *new_spte = spte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) u64 spte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) spte = __pa(child_pt) | shadow_present_mask | PT_WRITABLE_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) shadow_user_mask | shadow_x_mask | shadow_me_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (ad_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) spte |= SPTE_AD_DISABLED_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) spte |= shadow_accessed_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return spte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) u64 new_spte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) new_spte = old_spte & ~PT64_BASE_ADDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) new_spte |= (u64)new_pfn << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) new_spte &= ~PT_WRITABLE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) new_spte &= ~SPTE_HOST_WRITEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) new_spte = mark_spte_for_access_track(new_spte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return new_spte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static u8 kvm_get_shadow_phys_bits(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * in CPU detection code, but the processor treats those reduced bits as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * 'keyID' thus they are not reserved bits. Therefore KVM needs to look at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * the physical address bits reported by CPUID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return cpuid_eax(0x80000008) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * custom CPUID. Proceed with whatever the kernel found since these features
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return boot_cpu_data.x86_phys_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) u64 mark_spte_for_access_track(u64 spte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (spte_ad_enabled(spte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return spte & ~shadow_accessed_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (is_access_track_spte(spte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return spte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * Making an Access Tracking PTE will result in removal of write access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * from the PTE. So, verify that we will be able to restore the write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * access in the fast page fault path later on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) WARN_ONCE((spte & PT_WRITABLE_MASK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) !spte_can_locklessly_be_made_writable(spte),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) "kvm: Writable SPTE is not locklessly dirty-trackable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) WARN_ONCE(spte & (SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) SHADOW_ACC_TRACK_SAVED_BITS_SHIFT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) "kvm: Access Tracking saved bit locations are not zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) spte |= (spte & SHADOW_ACC_TRACK_SAVED_BITS_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) SHADOW_ACC_TRACK_SAVED_BITS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) spte &= ~shadow_acc_track_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return spte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) BUG_ON((u64)(unsigned)access_mask != access_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << SHADOW_NONPRESENT_OR_RSVD_MASK_LEN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) shadow_mmio_value = mmio_value | SPTE_MMIO_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) shadow_mmio_access_mask = access_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * Sets the shadow PTE masks used by the MMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * Assumptions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * - Setting either @accessed_mask or @dirty_mask requires setting both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * - At least one of @accessed_mask or @acc_track_mask must be set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) u64 acc_track_mask, u64 me_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) BUG_ON(!dirty_mask != !accessed_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) BUG_ON(!accessed_mask && !acc_track_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) BUG_ON(acc_track_mask & SPTE_SPECIAL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) shadow_user_mask = user_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) shadow_accessed_mask = accessed_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) shadow_dirty_mask = dirty_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) shadow_nx_mask = nx_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) shadow_x_mask = x_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) shadow_present_mask = p_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) shadow_acc_track_mask = acc_track_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) shadow_me_mask = me_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) void kvm_mmu_reset_all_pte_masks(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) u8 low_phys_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) shadow_user_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) shadow_accessed_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) shadow_dirty_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) shadow_nx_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) shadow_x_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) shadow_present_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) shadow_acc_track_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) shadow_phys_bits = kvm_get_shadow_phys_bits();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * If the CPU has 46 or less physical address bits, then set an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * appropriate mask to guard against L1TF attacks. Otherwise, it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * assumed that the CPU is not vulnerable to L1TF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * Some Intel CPUs address the L1 cache using more PA bits than are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * reported by CPUID. Use the PA width of the L1 cache when possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * to achieve more effective mitigation, e.g. if system RAM overlaps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * the most significant bits of legal physical address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) shadow_nonpresent_or_rsvd_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) low_phys_bits = boot_cpu_data.x86_phys_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (boot_cpu_has_bug(X86_BUG_L1TF) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 52 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) low_phys_bits = boot_cpu_data.x86_cache_bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) shadow_nonpresent_or_rsvd_mask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) shadow_nonpresent_or_rsvd_lower_gfn_mask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }