^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2012 - Virtual Open Systems and Columbia University
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Christoffer Dall <c.dall@virtualopensystems.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/hugetlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <trace/events/kvm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/kvm_arm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/kvm_mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/kvm_pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/kvm_ras.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/kvm_asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/kvm_emulate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/virt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static struct kvm_pgtable *hyp_pgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static unsigned long hyp_idmap_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static unsigned long hyp_idmap_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static phys_addr_t hyp_idmap_vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static unsigned long io_map_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Release kvm_mmu_lock periodically if the memory region is large. Otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * we may see kernel panics with CONFIG_DETECT_HUNG_TASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * CONFIG_LOCKUP_DETECTOR, CONFIG_LOCKDEP. Additionally, holding the lock too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * long will also starve other vCPUs. We have to also make sure that the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * tables are not freed while we released the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static int stage2_apply_range(struct kvm *kvm, phys_addr_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) phys_addr_t end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) int (*fn)(struct kvm_pgtable *, u64, u64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) bool resched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) u64 next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (!pgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) next = stage2_pgd_addr_end(kvm, addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) ret = fn(pgt, addr, next - addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (resched && next != end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) cond_resched_lock(&kvm->mmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) } while (addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define stage2_apply_range_resched(kvm, addr, end, fn) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) stage2_apply_range(kvm, addr, end, fn, true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static bool memslot_is_logging(struct kvm_memory_slot *memslot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * @kvm: pointer to kvm structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * Interface to HYP function to flush all VM TLB entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) void kvm_flush_remote_tlbs(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static bool kvm_is_device_pfn(unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return !pfn_valid(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static void *stage2_memcache_zalloc_page(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct kvm_mmu_memory_cache *mc = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* Allocated with __GFP_ZERO, so no need to zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return kvm_mmu_memory_cache_alloc(mc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static void *kvm_host_zalloc_pages_exact(size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return alloc_pages_exact(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static void kvm_host_get_page(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) get_page(virt_to_page(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static void kvm_host_put_page(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) put_page(virt_to_page(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static int kvm_host_page_count(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return page_count(virt_to_page(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static phys_addr_t kvm_host_pa(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return __pa(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static void *kvm_host_va(phys_addr_t phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return __va(phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * Unmapping vs dcache management:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * If a guest maps certain memory pages as uncached, all writes will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * bypass the data cache and go directly to RAM. However, the CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * can still speculate reads (not writes) and fill cache lines with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * Those cache lines will be *clean* cache lines though, so a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * clean+invalidate operation is equivalent to an invalidate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * operation, because no cache lines are marked dirty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * Those clean cache lines could be filled prior to an uncached write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * by the guest, and the cache coherent IO subsystem would therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * end up writing old data to disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * This is why right after unmapping a page/section and invalidating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * the corresponding TLBs, we flush to make sure the IO subsystem will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * never hit in the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * This is all avoided on systems that have ARM64_HAS_STAGE2_FWB, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * we then fully enforce cacheability of RAM, no matter what the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * does.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * @mmu: The KVM stage-2 MMU pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * @start: The intermediate physical base address of the range to unmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * @size: The size of the area to unmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * @may_block: Whether or not we are permitted to block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * be called while holding mmu_lock (unless for freeing the stage2 pgd before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * destroying the VM), otherwise another faulting VCPU may come in and mess
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * with things behind our backs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) bool may_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) phys_addr_t end = start + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) assert_spin_locked(&kvm->mmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) WARN_ON(size & ~PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) WARN_ON(stage2_apply_range(kvm, start, end, kvm_pgtable_stage2_unmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) may_block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) __unmap_stage2_range(mmu, start, size, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static void stage2_flush_memslot(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct kvm_memory_slot *memslot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * @kvm: The struct kvm pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * Go through the stage 2 page tables and invalidate any cache lines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * backing memory already mapped to the VM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static void stage2_flush_vm(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct kvm_memslots *slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct kvm_memory_slot *memslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) idx = srcu_read_lock(&kvm->srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) spin_lock(&kvm->mmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) slots = kvm_memslots(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) kvm_for_each_memslot(memslot, slots)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) stage2_flush_memslot(kvm, memslot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) spin_unlock(&kvm->mmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) srcu_read_unlock(&kvm->srcu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * free_hyp_pgds - free Hyp-mode page tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) void free_hyp_pgds(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) mutex_lock(&kvm_hyp_pgd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (hyp_pgtable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) kvm_pgtable_hyp_destroy(hyp_pgtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) kfree(hyp_pgtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) hyp_pgtable = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) mutex_unlock(&kvm_hyp_pgd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static bool kvm_host_owns_hyp_mappings(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (static_branch_likely(&kvm_protected_mode_initialized))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * This can happen at boot time when __create_hyp_mappings() is called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * after the hyp protection has been enabled, but the static key has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * not been flipped yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (!hyp_pgtable && is_protected_kvm_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) WARN_ON(!hyp_pgtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static int __create_hyp_mappings(unsigned long start, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) unsigned long phys, enum kvm_pgtable_prot prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (!kvm_host_owns_hyp_mappings()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return kvm_call_hyp_nvhe(__pkvm_create_mappings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) start, size, phys, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) mutex_lock(&kvm_hyp_pgd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) err = kvm_pgtable_hyp_map(hyp_pgtable, start, size, phys, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) mutex_unlock(&kvm_hyp_pgd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (!is_vmalloc_addr(kaddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) BUG_ON(!virt_addr_valid(kaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return __pa(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return page_to_phys(vmalloc_to_page(kaddr)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) offset_in_page(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * @from: The virtual kernel start address of the range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * @to: The virtual kernel end address of the range (exclusive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * @prot: The protection to be applied to this range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * The same virtual address as the kernel virtual address is also used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * physical pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) phys_addr_t phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) unsigned long virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) unsigned long start = kern_hyp_va((unsigned long)from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) unsigned long end = kern_hyp_va((unsigned long)to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (is_kernel_in_hyp_mode())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) start = start & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) end = PAGE_ALIGN(end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) err = __create_hyp_mappings(virt_addr, PAGE_SIZE, phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) unsigned long *haddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) enum kvm_pgtable_prot prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) unsigned long base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (!kvm_host_owns_hyp_mappings()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) base = kvm_call_hyp_nvhe(__pkvm_create_private_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) phys_addr, size, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (IS_ERR_OR_NULL((void *)base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return PTR_ERR((void *)base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) *haddr = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) mutex_lock(&kvm_hyp_pgd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * This assumes that we have enough space below the idmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * page to allocate our VAs. If not, the check below will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * kick. A potential alternative would be to detect that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * overflow and switch to an allocation above the idmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * The allocated size is always a multiple of PAGE_SIZE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) size = PAGE_ALIGN(size + offset_in_page(phys_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) base = io_map_base - size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * Verify that BIT(VA_BITS - 1) hasn't been flipped by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * allocating the new area, as it would indicate we've
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * overflowed the idmap/IO address range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if ((base ^ io_map_base) & BIT(VA_BITS - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) io_map_base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) mutex_unlock(&kvm_hyp_pgd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) ret = __create_hyp_mappings(base, size, phys_addr, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) *haddr = base + offset_in_page(phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * create_hyp_io_mappings - Map IO into both kernel and HYP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * @phys_addr: The physical start address which gets mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * @size: Size of the region being mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * @kaddr: Kernel VA for this mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * @haddr: HYP VA for this mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) void __iomem **kaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) void __iomem **haddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) *kaddr = ioremap(phys_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (!*kaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (is_kernel_in_hyp_mode()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) *haddr = *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) ret = __create_hyp_private_mapping(phys_addr, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) &addr, PAGE_HYP_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) iounmap(*kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) *kaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) *haddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) *haddr = (void __iomem *)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * create_hyp_exec_mappings - Map an executable range into HYP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * @phys_addr: The physical start address which gets mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * @size: Size of the region being mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * @haddr: HYP VA for this mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) void **haddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) BUG_ON(is_kernel_in_hyp_mode());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) ret = __create_hyp_private_mapping(phys_addr, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) &addr, PAGE_HYP_EXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) *haddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) *haddr = (void *)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) .zalloc_page = stage2_memcache_zalloc_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) .zalloc_pages_exact = kvm_host_zalloc_pages_exact,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) .free_pages_exact = free_pages_exact,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) .get_page = kvm_host_get_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) .put_page = kvm_host_put_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) .page_count = kvm_host_page_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) .phys_to_virt = kvm_host_va,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) .virt_to_phys = kvm_host_pa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * kvm_init_stage2_mmu - Initialise a S2 MMU strucrure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * @kvm: The pointer to the KVM structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * @mmu: The pointer to the s2 MMU structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * Allocates only the stage-2 HW PGD level table(s).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * Note we don't need locking here as this is only called when the VM is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * created, which can only be done once.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) int cpu, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct kvm_pgtable *pgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (mmu->pgt != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) kvm_err("kvm_arch already initialized?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) pgt = kzalloc(sizeof(*pgt), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (!pgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) err = kvm_pgtable_stage2_init(pgt, &kvm->arch, &kvm_s2_mm_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) goto out_free_pgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) mmu->last_vcpu_ran = alloc_percpu(typeof(*mmu->last_vcpu_ran));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (!mmu->last_vcpu_ran) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) goto out_destroy_pgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) mmu->arch = &kvm->arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) mmu->pgt = pgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) mmu->pgd_phys = __pa(pgt->pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) mmu->vmid.vmid_gen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) out_destroy_pgtable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) kvm_pgtable_stage2_destroy(pgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) out_free_pgtable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) kfree(pgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) static void stage2_unmap_memslot(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct kvm_memory_slot *memslot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) hva_t hva = memslot->userspace_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) phys_addr_t size = PAGE_SIZE * memslot->npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) hva_t reg_end = hva + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * A memory region could potentially cover multiple VMAs, and any holes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * between them, so iterate over all of them to find out if we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * unmap any of them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * +--------------------------------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * +---------------+----------------+ +----------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * | : VMA 1 | VMA 2 | | VMA 3 : |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * +---------------+----------------+ +----------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * | memory region |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * +--------------------------------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct vm_area_struct *vma = find_vma(current->mm, hva);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) hva_t vm_start, vm_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (!vma || vma->vm_start >= reg_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * Take the intersection of this VMA with the memory region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) vm_start = max(hva, vma->vm_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) vm_end = min(reg_end, vma->vm_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (!(vma->vm_flags & VM_PFNMAP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) hva = vm_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) } while (hva < reg_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * stage2_unmap_vm - Unmap Stage-2 RAM mappings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * @kvm: The struct kvm pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * Go through the memregions and unmap any regular RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * backing memory already mapped to the VM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) void stage2_unmap_vm(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct kvm_memslots *slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct kvm_memory_slot *memslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) idx = srcu_read_lock(&kvm->srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) mmap_read_lock(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) spin_lock(&kvm->mmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) slots = kvm_memslots(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) kvm_for_each_memslot(memslot, slots)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) stage2_unmap_memslot(kvm, memslot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) spin_unlock(&kvm->mmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) mmap_read_unlock(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) srcu_read_unlock(&kvm->srcu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct kvm_pgtable *pgt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) spin_lock(&kvm->mmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) pgt = mmu->pgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (pgt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) mmu->pgd_phys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) mmu->pgt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) free_percpu(mmu->last_vcpu_ran);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) spin_unlock(&kvm->mmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (pgt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) kvm_pgtable_stage2_destroy(pgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) kfree(pgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * kvm_phys_addr_ioremap - map a device range to guest IPA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * @kvm: The KVM pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * @guest_ipa: The IPA at which to insert the mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * @pa: The physical address of the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * @size: The size of the mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * @writable: Whether or not to create a writable mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) phys_addr_t pa, unsigned long size, bool writable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) phys_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, NULL, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) KVM_PGTABLE_PROT_R |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) (writable ? KVM_PGTABLE_PROT_W : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) size += offset_in_page(guest_ipa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) guest_ipa &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) for (addr = guest_ipa; addr < guest_ipa + size; addr += PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) ret = kvm_mmu_topup_memory_cache(&cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) kvm_mmu_cache_min_pages(kvm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) spin_lock(&kvm->mmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) &cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) spin_unlock(&kvm->mmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) pa += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) kvm_mmu_free_memory_cache(&cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * stage2_wp_range() - write protect stage2 memory region range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * @mmu: The KVM stage-2 MMU pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * @addr: Start address of range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * @end: End address of range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_wrprotect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * @kvm: The KVM pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * @slot: The memory slot to write protect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * Called to start logging dirty pages after memory region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * all present PUD, PMD and PTEs are write protected in the memory region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * Afterwards read of dirty page log can be called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * serializing operations for VM memory regions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) struct kvm_memslots *slots = kvm_memslots(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) phys_addr_t start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (WARN_ON_ONCE(!memslot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) start = memslot->base_gfn << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) spin_lock(&kvm->mmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) stage2_wp_range(&kvm->arch.mmu, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) spin_unlock(&kvm->mmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) kvm_flush_remote_tlbs(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * @kvm: The KVM pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * @slot: The memory slot associated with mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * @gfn_offset: The gfn offset in memory slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * slot to be write protected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * Walks bits set in mask write protects the associated pte's. Caller must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * acquire kvm_mmu_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) struct kvm_memory_slot *slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) gfn_t gfn_offset, unsigned long mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) stage2_wp_range(&kvm->arch.mmu, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * dirty pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * enable dirty logging for them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) struct kvm_memory_slot *slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) gfn_t gfn_offset, unsigned long mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) __clean_dcache_guest_page(pfn, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) __invalidate_icache_guest_page(pfn, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) unsigned long hva,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) unsigned long map_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) gpa_t gpa_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) hva_t uaddr_start, uaddr_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) /* The memslot and the VMA are guaranteed to be aligned to PAGE_SIZE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (map_size == PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) size = memslot->npages * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) gpa_start = memslot->base_gfn << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) uaddr_start = memslot->userspace_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) uaddr_end = uaddr_start + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * Pages belonging to memslots that don't have the same alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * PMD/PUD entries, because we'll end up mapping the wrong pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * Consider a layout like the following:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * memslot->userspace_addr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * +-----+--------------------+--------------------+---+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * +-----+--------------------+--------------------+---+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * memslot->base_gfn << PAGE_SHIFT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * +---+--------------------+--------------------+-----+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * |abc|def Stage-2 block | Stage-2 block |tvxyz|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * +---+--------------------+--------------------+-----+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * If we create those stage-2 blocks, we'll end up with this incorrect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * mapping:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * d -> f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * e -> g
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * f -> h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * Next, let's make sure we're not trying to map anything not covered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * by the memslot. This means we have to prohibit block size mappings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * for the beginning and end of a non-block aligned and non-block sized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * memory slot (illustrated by the head and tail parts of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * userspace view above containing pages 'abcde' and 'xyz',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * respectively).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * Note that it doesn't matter if we do the check using the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * userspace_addr or the base_gfn, as both are equally aligned (per
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * the check above) and equally sized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return (hva & ~(map_size - 1)) >= uaddr_start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) (hva & ~(map_size - 1)) + map_size <= uaddr_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * Check if the given hva is backed by a transparent huge page (THP) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * whether it can be mapped using block mapping in stage2. If so, adjust
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * the stage2 PFN and IPA accordingly. Only PMD_SIZE THPs are currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * supported. This will need to be updated to support other THP sizes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * Returns the size of the mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) static unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) transparent_hugepage_adjust(struct kvm_memory_slot *memslot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) unsigned long hva, kvm_pfn_t *pfnp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) phys_addr_t *ipap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) kvm_pfn_t pfn = *pfnp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * Make sure the adjustment is done only for THP pages. Also make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * sure that the HVA and IPA are sufficiently aligned and that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * block map is contained within the memslot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (kvm_is_transparent_hugepage(pfn) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * The address we faulted on is backed by a transparent huge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * page. However, because we map the compound huge page and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * not the individual tail page, we need to transfer the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * refcount to the head page. We have to be careful that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * THP doesn't start to split while we are adjusting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * refcounts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * We are sure this doesn't happen, because mmu_notifier_retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * was successful and we are holding the mmu_lock, so if this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * THP is trying to split, it will be blocked in the mmu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * notifier before touching any of the pages, specifically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * before being able to call __split_huge_page_refcount().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) * We can therefore safely transfer the refcount from PG_tail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) * to PG_head and switch the pfn from a tail page to the head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * page accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) *ipap &= PMD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) kvm_release_pfn_clean(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) pfn &= ~(PTRS_PER_PMD - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) kvm_get_pfn(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) *pfnp = pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) return PMD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /* Use page mapping if we cannot use block mapping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct kvm_memory_slot *memslot, unsigned long hva,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) unsigned long fault_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) bool write_fault, writable, force_pte = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) bool exec_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) bool device = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) unsigned long mmu_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) struct kvm *kvm = vcpu->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) short vma_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) gfn_t gfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) kvm_pfn_t pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) bool logging_active = memslot_is_logging(memslot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) unsigned long vma_pagesize, fault_granule;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) struct kvm_pgtable *pgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) write_fault = kvm_is_write_fault(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) VM_BUG_ON(write_fault && exec_fault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) kvm_err("Unexpected L2 read permission error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) /* Let's check if we will get back a huge page backed by hugetlbfs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) mmap_read_lock(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) vma = find_vma_intersection(current->mm, hva, hva + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (unlikely(!vma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) mmap_read_unlock(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (is_vm_hugetlb_page(vma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) vma_shift = huge_page_shift(hstate_vma(vma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) vma_shift = PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (logging_active ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) (vma->vm_flags & VM_PFNMAP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) force_pte = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) vma_shift = PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) switch (vma_shift) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) #ifndef __PAGETABLE_PMD_FOLDED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) case PUD_SHIFT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) case CONT_PMD_SHIFT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) vma_shift = PMD_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) case PMD_SHIFT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) case CONT_PTE_SHIFT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) vma_shift = PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) force_pte = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) case PAGE_SHIFT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) WARN_ONCE(1, "Unknown vma_shift %d", vma_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) vma_pagesize = 1UL << vma_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) fault_ipa &= ~(vma_pagesize - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) gfn = fault_ipa >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) mmap_read_unlock(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * Permission faults just need to update the existing leaf entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * and so normally don't require allocations from the memcache. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * only exception to this is when dirty logging is enabled at runtime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * and a write fault needs to collapse a block entry into a table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (fault_status != FSC_PERM || (logging_active && write_fault)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) ret = kvm_mmu_topup_memory_cache(memcache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) kvm_mmu_cache_min_pages(kvm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) mmu_seq = vcpu->kvm->mmu_notifier_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * Ensure the read of mmu_notifier_seq happens before we call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * the page we just got a reference to gets unmapped before we have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * chance to grab the mmu_lock, which ensure that if the page gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * unmapped afterwards, the call to kvm_unmap_hva will take it away
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * from us again properly. This smp_rmb() interacts with the smp_wmb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * in kvm_mmu_notifier_invalidate_<page|range_end>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (pfn == KVM_PFN_ERR_HWPOISON) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) kvm_send_hwpoison_signal(hva, vma_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (is_error_noslot_pfn(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (kvm_is_device_pfn(pfn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) device = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) force_pte = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) } else if (logging_active && !write_fault) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * Only actually map the page as writable if this was a write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) writable = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (exec_fault && device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) return -ENOEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) spin_lock(&kvm->mmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) pgt = vcpu->arch.hw_mmu->pgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (mmu_notifier_retry(kvm, mmu_seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * If we are not forced to use page mapping, check if we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * backed by a THP and thus use block mapping if possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (vma_pagesize == PAGE_SIZE && !force_pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) vma_pagesize = transparent_hugepage_adjust(memslot, hva,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) &pfn, &fault_ipa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (writable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) prot |= KVM_PGTABLE_PROT_W;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (fault_status != FSC_PERM && !device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) clean_dcache_guest_page(pfn, vma_pagesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (exec_fault) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) prot |= KVM_PGTABLE_PROT_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) invalidate_icache_guest_page(pfn, vma_pagesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) prot |= KVM_PGTABLE_PROT_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) else if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) prot |= KVM_PGTABLE_PROT_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * Under the premise of getting a FSC_PERM fault, we just need to relax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * permissions only if vma_pagesize equals fault_granule. Otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * kvm_pgtable_stage2_map() should be called to change block size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (fault_status == FSC_PERM && vma_pagesize == fault_granule) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) __pfn_to_phys(pfn), prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) memcache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) /* Mark the page dirty only if the fault is handled successfully */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (writable && !ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) kvm_set_pfn_dirty(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) mark_page_dirty(kvm, gfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) spin_unlock(&kvm->mmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) kvm_set_pfn_accessed(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) kvm_release_pfn_clean(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return ret != -EAGAIN ? ret : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) /* Resolve the access fault by making the page young again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) pte_t pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) kvm_pte_t kpte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) struct kvm_s2_mmu *mmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) trace_kvm_access_fault(fault_ipa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) spin_lock(&vcpu->kvm->mmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) mmu = vcpu->arch.hw_mmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) kpte = kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) spin_unlock(&vcpu->kvm->mmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) pte = __pte(kpte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (pte_valid(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) kvm_set_pfn_accessed(pte_pfn(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * kvm_handle_guest_abort - handles all 2nd stage aborts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * @vcpu: the VCPU pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * Any abort that gets to the host is almost guaranteed to be caused by a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * missing second stage translation table entry, which can mean that either the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * guest simply needs more memory and we must allocate an appropriate page or it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * can mean that the guest tried to access I/O memory, which is emulated by user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * space. The distinction is based on the IPA causing the fault and whether this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * memory region has been registered as standard RAM by user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) unsigned long fault_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) phys_addr_t fault_ipa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) struct kvm_memory_slot *memslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) unsigned long hva;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) bool is_iabt, write_fault, writable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) gfn_t gfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) int ret, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) /* Synchronous External Abort? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (kvm_vcpu_abt_issea(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * For RAS the host kernel may handle this abort.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) * There is no need to pass the error into the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) kvm_inject_vabt(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) kvm_vcpu_get_hfar(vcpu), fault_ipa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) /* Check the stage-2 fault is trans. fault or write fault */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) fault_status != FSC_ACCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) kvm_vcpu_trap_get_class(vcpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) (unsigned long)kvm_vcpu_get_esr(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) idx = srcu_read_lock(&vcpu->kvm->srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) gfn = fault_ipa >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) memslot = gfn_to_memslot(vcpu->kvm, gfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) write_fault = kvm_is_write_fault(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) * The guest has put either its instructions or its page-tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * somewhere it shouldn't have. Userspace won't be able to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) * anything about this (there's no syndrome for a start), so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * re-inject the abort back into the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (is_iabt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) ret = -ENOEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) if (kvm_vcpu_abt_iss1tw(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * Check for a cache maintenance operation. Since we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * ended-up here, we know it is outside of any memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * slot. But we can't find out if that is for a device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) * or if the guest is just being stupid. The only thing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * we know for sure is that this range cannot be cached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * So let's assume that the guest is just being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * cautious, and skip the instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (kvm_is_error_hva(hva) && kvm_vcpu_dabt_is_cm(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) kvm_incr_pc(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * The IPA is reported as [MAX:12], so we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * complement it with the bottom 12 bits from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * faulting VA. This is always 12 bits, irrespective
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * of the page size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) ret = io_mem_abort(vcpu, fault_ipa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) /* Userspace should not be able to register out-of-bounds IPAs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (fault_status == FSC_ACCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) handle_access_fault(vcpu, fault_ipa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (ret == -ENOEXEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) srcu_read_unlock(&vcpu->kvm->srcu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) static int handle_hva_to_gpa(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) int (*handler)(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) gpa_t gpa, u64 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) void *data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) struct kvm_memslots *slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) struct kvm_memory_slot *memslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) slots = kvm_memslots(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) /* we only care about the pages that the guest sees */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) kvm_for_each_memslot(memslot, slots) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) unsigned long hva_start, hva_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) gfn_t gpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) hva_start = max(start, memslot->userspace_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) hva_end = min(end, memslot->userspace_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) (memslot->npages << PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (hva_start >= hva_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) unsigned flags = *(unsigned *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) __unmap_stage2_range(&kvm->arch.mmu, gpa, size, may_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) int kvm_unmap_hva_range(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) unsigned long start, unsigned long end, unsigned flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (!kvm->arch.mmu.pgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) trace_kvm_unmap_hva_range(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) kvm_pfn_t *pfn = (kvm_pfn_t *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) WARN_ON(size != PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * The MMU notifiers will have unmapped a huge PMD before calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * ->change_pte() (which in turn calls kvm_set_spte_hva()) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * therefore we never need to clear out a huge PMD through this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * calling path and a memcache is not required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, gpa, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) __pfn_to_phys(*pfn), KVM_PGTABLE_PROT_R, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) unsigned long end = hva + PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) kvm_pfn_t pfn = pte_pfn(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) if (!kvm->arch.mmu.pgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) trace_kvm_set_spte_hva(hva);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) * We've moved a page around, probably through CoW, so let's treat it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * just like a translation fault and clean the cache to the PoC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) clean_dcache_guest_page(pfn, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) pte_t pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) kvm_pte_t kpte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt, gpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) pte = __pte(kpte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) return pte_valid(pte) && pte_young(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt, gpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) if (!kvm->arch.mmu.pgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) trace_kvm_age_hva(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (!kvm->arch.mmu.pgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) trace_kvm_test_age_hva(hva);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) return handle_hva_to_gpa(kvm, hva, hva + PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) kvm_test_age_hva_handler, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) phys_addr_t kvm_mmu_get_httbr(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) return __pa(hyp_pgtable->pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) phys_addr_t kvm_get_idmap_vector(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) return hyp_idmap_vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) static int kvm_map_idmap_text(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) unsigned long size = hyp_idmap_end - hyp_idmap_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) int err = __create_hyp_mappings(hyp_idmap_start, size, hyp_idmap_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) PAGE_HYP_EXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) kvm_err("Failed to idmap %lx-%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) hyp_idmap_start, hyp_idmap_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) static void *kvm_hyp_zalloc_page(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) return (void *)get_zeroed_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) .zalloc_page = kvm_hyp_zalloc_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) .get_page = kvm_host_get_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) .put_page = kvm_host_put_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) .phys_to_virt = kvm_host_va,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) .virt_to_phys = kvm_host_pa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) int kvm_mmu_init(u32 *hyp_va_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) hyp_idmap_end = __pa_symbol(__hyp_idmap_text_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) hyp_idmap_vector = __pa_symbol(__kvm_hyp_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * We rely on the linker script to ensure at build time that the HYP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * init code does not cross a page boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) *hyp_va_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) kvm_debug("HYP VA range: %lx:%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) kern_hyp_va(PAGE_OFFSET),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) kern_hyp_va((unsigned long)high_memory - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) hyp_idmap_start < kern_hyp_va((unsigned long)high_memory - 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) * The idmap page is intersecting with the VA space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) * it is not safe to continue further.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) hyp_pgtable = kzalloc(sizeof(*hyp_pgtable), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (!hyp_pgtable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) kvm_err("Hyp mode page-table not allocated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) err = kvm_pgtable_hyp_init(hyp_pgtable, *hyp_va_bits, &kvm_hyp_mm_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) goto out_free_pgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) err = kvm_map_idmap_text();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) goto out_destroy_pgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) io_map_base = hyp_idmap_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) out_destroy_pgtable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) kvm_pgtable_hyp_destroy(hyp_pgtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) out_free_pgtable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) kfree(hyp_pgtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) hyp_pgtable = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) void kvm_arch_commit_memory_region(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) const struct kvm_userspace_memory_region *mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) struct kvm_memory_slot *old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) const struct kvm_memory_slot *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) enum kvm_mr_change change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) * At this point memslot has been committed and there is an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * allocated dirty_bitmap[], dirty pages will be tracked while the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * memory slot is write protected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) * If we're with initial-all-set, we don't need to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * protect any pages because they're all reported as dirty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) * Huge pages and normal pages will be write protect gradually.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) kvm_mmu_wp_memory_region(kvm, mem->slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) int kvm_arch_prepare_memory_region(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) struct kvm_memory_slot *memslot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) const struct kvm_userspace_memory_region *mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) enum kvm_mr_change change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) hva_t hva = mem->userspace_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) hva_t reg_end = hva + mem->memory_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) bool writable = !(mem->flags & KVM_MEM_READONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) change != KVM_MR_FLAGS_ONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) * Prevent userspace from creating a memory region outside of the IPA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) * space addressable by the KVM guest IPA space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) mmap_read_lock(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) * A memory region could potentially cover multiple VMAs, and any holes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) * between them, so iterate over all of them to find out if we can map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) * any of them right now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) * +--------------------------------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) * +---------------+----------------+ +----------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) * | : VMA 1 | VMA 2 | | VMA 3 : |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) * +---------------+----------------+ +----------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) * | memory region |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) * +--------------------------------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) struct vm_area_struct *vma = find_vma(current->mm, hva);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) hva_t vm_start, vm_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) if (!vma || vma->vm_start >= reg_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) * Take the intersection of this VMA with the memory region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) vm_start = max(hva, vma->vm_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) vm_end = min(reg_end, vma->vm_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) if (vma->vm_flags & VM_PFNMAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) gpa_t gpa = mem->guest_phys_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) (vm_start - mem->userspace_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) phys_addr_t pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) pa += vm_start - vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) /* IO region dirty page logging not allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) vm_end - vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) writable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) hva = vm_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) } while (hva < reg_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) if (change == KVM_MR_FLAGS_ONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) spin_lock(&kvm->mmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) unmap_stage2_range(&kvm->arch.mmu, mem->guest_phys_addr, mem->memory_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) else if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) stage2_flush_memslot(kvm, memslot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) spin_unlock(&kvm->mmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) mmap_read_unlock(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) void kvm_arch_flush_shadow_all(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) kvm_free_stage2_pgd(&kvm->arch.mmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) struct kvm_memory_slot *slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) phys_addr_t size = slot->npages << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) spin_lock(&kvm->mmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) unmap_stage2_range(&kvm->arch.mmu, gpa, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) spin_unlock(&kvm->mmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) * Main problems:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) * - S/W ops are local to a CPU (not broadcast)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) * - We have line migration behind our back (speculation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) * - System caches don't support S/W at all (damn!)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) * In the face of the above, the best we can do is to try and convert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) * S/W ops to VA ops. Because the guest is not allowed to infer the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) * S/W to PA mapping, it can only use S/W to nuke the whole cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) * which is a rather good thing for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) * Also, it is only used when turning caches on/off ("The expected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) * usage of the cache maintenance instructions that operate by set/way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) * is associated with the cache maintenance instructions associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) * with the powerdown and powerup of caches, if this is required by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) * the implementation.").
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) * We use the following policy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) * - If we trap a S/W operation, we enable VM trapping to detect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) * caches being turned on/off, and do a full clean.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) * - We flush the caches on both caches being turned on and off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) * - Once the caches are enabled, we stop trapping VM ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) void kvm_set_way_flush(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) unsigned long hcr = *vcpu_hcr(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) * If this is the first time we do a S/W operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) * (i.e. HCR_TVM not set) flush the whole memory, and set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) * VM trapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) * Otherwise, rely on the VM trapping to wait for the MMU +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) * Caches to be turned off. At that point, we'll be able to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) * clean the caches again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) if (!(hcr & HCR_TVM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) trace_kvm_set_way_flush(*vcpu_pc(vcpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) vcpu_has_cache_enabled(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) stage2_flush_vm(vcpu->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) *vcpu_hcr(vcpu) = hcr | HCR_TVM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) bool now_enabled = vcpu_has_cache_enabled(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) * If switching the MMU+caches on, need to invalidate the caches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) * If switching it off, need to clean the caches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) * Clean + invalidate does the trick always.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) if (now_enabled != was_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) stage2_flush_vm(vcpu->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) /* Caches are now on, stop trapping VM ops (until a S/W op) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) if (now_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) *vcpu_hcr(vcpu) &= ~HCR_TVM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) }