^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Kernel-based Virtual Machine driver for Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * AMD SVM support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2006 Qumranet, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright 2010 Red Hat, Inc. and/or its affiliates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Yaniv Kamay <yaniv@qumranet.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Avi Kivity <avi@qumranet.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define pr_fmt(fmt) "SVM: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/kvm_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/hashtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/amd-iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/irq_remapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "lapic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "x86.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "irq.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "svm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /* enable / disable AVIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) int avic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #ifdef CONFIG_X86_LOCAL_APIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) module_param(avic, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define SVM_AVIC_DOORBELL 0xc001011b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * 0xff is broadcast, so the max index allowed for physical APIC ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * table is 0xfe. APIC IDs above 0xff are reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define AVIC_MAX_PHYSICAL_ID_COUNT 255
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* AVIC GATAG is encoded using VM and VCPU IDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define AVIC_VCPU_ID_BITS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define AVIC_VM_ID_BITS 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define AVIC_VM_ID_NR (1 << AVIC_VM_ID_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define AVIC_VM_ID_MASK ((1 << AVIC_VM_ID_BITS) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define AVIC_GATAG(x, y) (((x & AVIC_VM_ID_MASK) << AVIC_VCPU_ID_BITS) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) (y & AVIC_VCPU_ID_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VCPU_ID_BITS) & AVIC_VM_ID_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define AVIC_GATAG_TO_VCPUID(x) (x & AVIC_VCPU_ID_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * This hash table is used to map VM_ID to a struct kvm_svm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * when handling AMD IOMMU GALOG notification to schedule in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * a particular vCPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define SVM_VM_DATA_HASH_BITS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static DEFINE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static u32 next_vm_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static bool next_vm_id_wrapped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * This is a wrapper of struct amd_iommu_ir_data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct amd_svm_iommu_ir {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct list_head node; /* Used by SVM for per-vcpu ir_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) void *data; /* Storing pointer to struct amd_ir_data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) enum avic_ipi_failure_cause {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) AVIC_IPI_FAILURE_INVALID_INT_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) AVIC_IPI_FAILURE_INVALID_TARGET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * This function is called from IOMMU driver to notify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * SVM to schedule in a particular vCPU of a particular VM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) int avic_ga_log_notifier(u32 ga_tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct kvm_svm *kvm_svm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct kvm_vcpu *vcpu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) trace_kvm_avic_ga_log(vm_id, vcpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) hash_for_each_possible(svm_vm_data_hash, kvm_svm, hnode, vm_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (kvm_svm->avic_vm_id != vm_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) vcpu = kvm_get_vcpu_by_id(&kvm_svm->kvm, vcpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * At this point, the IOMMU should have already set the pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * bit in the vAPIC backing page. So, we just need to schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * in the vcpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) kvm_vcpu_wake_up(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) void avic_vm_destroy(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (!avic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (kvm_svm->avic_logical_id_table_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) __free_page(kvm_svm->avic_logical_id_table_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (kvm_svm->avic_physical_id_table_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) __free_page(kvm_svm->avic_physical_id_table_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) hash_del(&kvm_svm->hnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) int avic_vm_init(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct kvm_svm *k2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct page *p_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct page *l_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) u32 vm_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (!avic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* Allocating physical APIC ID table (4KB) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) p_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (!p_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) goto free_avic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) kvm_svm->avic_physical_id_table_page = p_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /* Allocating logical APIC ID table (4KB) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) l_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (!l_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) goto free_avic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) kvm_svm->avic_logical_id_table_page = l_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) vm_id = next_vm_id = (next_vm_id + 1) & AVIC_VM_ID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (vm_id == 0) { /* id is 1-based, zero is not okay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) next_vm_id_wrapped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /* Is it still in use? Only possible if wrapped at least once */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (next_vm_id_wrapped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) hash_for_each_possible(svm_vm_data_hash, k2, hnode, vm_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (k2->avic_vm_id == vm_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) kvm_svm->avic_vm_id = vm_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) hash_add(svm_vm_data_hash, &kvm_svm->hnode, kvm_svm->avic_vm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) free_avic:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) avic_vm_destroy(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) void avic_init_vmcb(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct vmcb *vmcb = svm->vmcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) phys_addr_t lpa = __sme_set(page_to_phys(kvm_svm->avic_logical_id_table_page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) phys_addr_t ppa = __sme_set(page_to_phys(kvm_svm->avic_physical_id_table_page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (kvm_apicv_activated(svm->vcpu.kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) u64 *avic_physical_id_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (index >= AVIC_MAX_PHYSICAL_ID_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) avic_physical_id_table = page_address(kvm_svm->avic_physical_id_table_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return &avic_physical_id_table[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * AVIC hardware walks the nested page table to check permissions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * but does not use the SPA address specified in the leaf page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * table entry since it uses address in the AVIC_BACKING_PAGE pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * field of the VMCB. Therefore, we set up the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (4KB) here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static int avic_update_access_page(struct kvm *kvm, bool activate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) mutex_lock(&kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * During kvm_destroy_vm(), kvm_pit_set_reinject() could trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * APICv mode change, which update APIC_ACCESS_PAGE_PRIVATE_MEMSLOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * memory region. So, we need to ensure that kvm->mm == current->mm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if ((kvm->arch.apic_access_page_done == activate) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) (kvm->mm != current->mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) ret = __x86_set_memory_region(kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) APIC_DEFAULT_PHYS_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) activate ? PAGE_SIZE : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) kvm->arch.apic_access_page_done = activate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) mutex_unlock(&kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static int avic_init_backing_page(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) u64 *entry, new_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) int id = vcpu->vcpu_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (id >= AVIC_MAX_PHYSICAL_ID_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (!svm->vcpu.arch.apic->regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (kvm_apicv_activated(vcpu->kvm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) ret = avic_update_access_page(vcpu->kvm, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) svm->avic_backing_page = virt_to_page(svm->vcpu.arch.apic->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /* Setting AVIC backing page address in the phy APIC ID table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) entry = avic_get_physical_id_entry(vcpu, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) WRITE_ONCE(*entry, new_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) svm->avic_physical_id_cache = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) u32 icrl = svm->vmcb->control.exit_info_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) u32 id = svm->vmcb->control.exit_info_2 >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) u32 index = svm->vmcb->control.exit_info_2 & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct kvm_lapic *apic = svm->vcpu.arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) trace_kvm_avic_incomplete_ipi(svm->vcpu.vcpu_id, icrh, icrl, id, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) switch (id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) case AVIC_IPI_FAILURE_INVALID_INT_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * AVIC hardware handles the generation of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * IPIs when the specified Message Type is Fixed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * (also known as fixed delivery mode) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * the Trigger Mode is edge-triggered. The hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * also supports self and broadcast delivery modes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * specified via the Destination Shorthand(DSH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * field of the ICRL. Logical and physical APIC ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * formats are supported. All other IPI types cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * a #VMEXIT, which needs to emulated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) kvm_lapic_reg_write(apic, APIC_ICR, icrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct kvm *kvm = svm->vcpu.kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct kvm_lapic *apic = svm->vcpu.arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * At this point, we expect that the AVIC HW has already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * set the appropriate IRR bits on the valid target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * vcpus. So, we just need to kick the appropriate vcpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) kvm_for_each_vcpu(i, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) bool m = kvm_apic_match_dest(vcpu, apic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) icrl & APIC_SHORT_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) GET_APIC_DEST_FIELD(icrh),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) icrl & APIC_DEST_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (m && !avic_vcpu_is_running(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) kvm_vcpu_wake_up(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) case AVIC_IPI_FAILURE_INVALID_TARGET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) WARN_ONCE(1, "Invalid backing page\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) pr_err("Unknown IPI interception\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) u32 *logical_apic_id_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) int dlid = GET_APIC_LOGICAL_ID(ldr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (!dlid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (flat) { /* flat */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) index = ffs(dlid) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (index > 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) } else { /* cluster */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) int cluster = (dlid & 0xf0) >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) int apic = ffs(dlid & 0x0f) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if ((apic < 0) || (apic > 7) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) (cluster >= 0xf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) index = (cluster << 2) + apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) logical_apic_id_table = (u32 *) page_address(kvm_svm->avic_logical_id_table_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return &logical_apic_id_table[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static int avic_ldr_write(struct kvm_vcpu *vcpu, u8 g_physical_id, u32 ldr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) bool flat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) u32 *entry, new_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) entry = avic_get_logical_id_entry(vcpu, ldr, flat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) new_entry = READ_ONCE(*entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) new_entry &= ~AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) new_entry |= (g_physical_id & AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) new_entry |= AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) WRITE_ONCE(*entry, new_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static void avic_invalidate_logical_id_entry(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) bool flat = svm->dfr_reg == APIC_DFR_FLAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) u32 *entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) clear_bit(AVIC_LOGICAL_ID_ENTRY_VALID_BIT, (unsigned long *)entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) u32 id = kvm_xapic_id(vcpu->arch.apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (ldr == svm->ldr_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) avic_invalidate_logical_id_entry(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (ldr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) ret = avic_ldr_write(vcpu, id, ldr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) svm->ldr_reg = ldr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) u64 *old, *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) u32 id = kvm_xapic_id(vcpu->arch.apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (vcpu->vcpu_id == id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) old = avic_get_physical_id_entry(vcpu, vcpu->vcpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) new = avic_get_physical_id_entry(vcpu, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (!new || !old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /* We need to move physical_id_entry to new offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) *new = *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) *old = 0ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) to_svm(vcpu)->avic_physical_id_cache = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * Also update the guest physical APIC ID in the logical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * APIC ID table entry if already setup the LDR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (svm->ldr_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) avic_handle_ldr_update(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) static void avic_handle_dfr_update(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (svm->dfr_reg == dfr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) avic_invalidate_logical_id_entry(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) svm->dfr_reg = dfr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) static int avic_unaccel_trap_write(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct kvm_lapic *apic = svm->vcpu.arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) u32 offset = svm->vmcb->control.exit_info_1 &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) AVIC_UNACCEL_ACCESS_OFFSET_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) switch (offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) case APIC_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (avic_handle_apic_id_update(&svm->vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) case APIC_LDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (avic_handle_ldr_update(&svm->vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) case APIC_DFR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) avic_handle_dfr_update(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) static bool is_avic_unaccelerated_access_trap(u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) switch (offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) case APIC_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) case APIC_EOI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) case APIC_RRR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) case APIC_LDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) case APIC_DFR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) case APIC_SPIV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) case APIC_ESR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) case APIC_ICR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) case APIC_LVTT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) case APIC_LVTTHMR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) case APIC_LVTPC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) case APIC_LVT0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) case APIC_LVT1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) case APIC_LVTERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) case APIC_TMICT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) case APIC_TDCR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) u32 offset = svm->vmcb->control.exit_info_1 &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) AVIC_UNACCEL_ACCESS_OFFSET_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) u32 vector = svm->vmcb->control.exit_info_2 &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) AVIC_UNACCEL_ACCESS_VECTOR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) bool write = (svm->vmcb->control.exit_info_1 >> 32) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) AVIC_UNACCEL_ACCESS_WRITE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) bool trap = is_avic_unaccelerated_access_trap(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) trace_kvm_avic_unaccelerated_access(svm->vcpu.vcpu_id, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) trap, write, vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (trap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /* Handling Trap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) WARN_ONCE(!write, "svm: Handling trap read.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) ret = avic_unaccel_trap_write(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /* Handling Fault */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) ret = kvm_emulate_instruction(&svm->vcpu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) int avic_init_vcpu(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) struct kvm_vcpu *vcpu = &svm->vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (!avic || !irqchip_in_kernel(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) ret = avic_init_backing_page(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) INIT_LIST_HEAD(&svm->ir_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) spin_lock_init(&svm->ir_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) svm->dfr_reg = APIC_DFR_FLAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) void avic_post_state_restore(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (avic_handle_apic_id_update(vcpu) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) avic_handle_dfr_update(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) avic_handle_ldr_update(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (!avic || !lapic_in_kernel(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) kvm_request_apicv_update(vcpu->kvm, activate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) APICV_INHIBIT_REASON_IRQWIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) static int svm_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) struct amd_svm_iommu_ir *ir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (!kvm_arch_has_assigned_device(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * Here, we go through the per-vcpu ir_list to update all existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * interrupt remapping table entry targeting this vcpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) spin_lock_irqsave(&svm->ir_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (list_empty(&svm->ir_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) list_for_each_entry(ir, &svm->ir_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (activate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) ret = amd_iommu_activate_guest_mode(ir->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) ret = amd_iommu_deactivate_guest_mode(ir->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) spin_unlock_irqrestore(&svm->ir_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) struct vmcb *vmcb = svm->vmcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) bool activated = kvm_vcpu_apicv_active(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (!avic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (activated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * During AVIC temporary deactivation, guest could update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * APIC ID, DFR and LDR registers, which would not be trapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * by avic_unaccelerated_access_interception(). In this case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * we need to check and update the AVIC logical APIC ID table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * accordingly before re-activating.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) avic_post_state_restore(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) vmcb_mark_dirty(vmcb, VMCB_AVIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) svm_set_pi_irte_mode(vcpu, activated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (!vcpu->arch.apicv_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) kvm_lapic_set_irr(vec, vcpu->arch.apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (avic_vcpu_is_running(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) int cpuid = vcpu->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (cpuid != get_cpu())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) wrmsrl(SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) kvm_vcpu_wake_up(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct amd_svm_iommu_ir *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) spin_lock_irqsave(&svm->ir_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) list_for_each_entry(cur, &svm->ir_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (cur->data != pi->ir_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) list_del(&cur->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) kfree(cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) spin_unlock_irqrestore(&svm->ir_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) struct amd_svm_iommu_ir *ir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * In some cases, the existing irte is updaed and re-set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * so we need to check here if it's already been * added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * to the ir_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (pi->ir_data && (pi->prev_ga_tag != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) struct kvm *kvm = svm->vcpu.kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) struct vcpu_svm *prev_svm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (!prev_vcpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) prev_svm = to_svm(prev_vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) svm_ir_list_del(prev_svm, pi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * Allocating new amd_iommu_pi_data, which will get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * add to the per-vcpu ir_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL_ACCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (!ir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) ir->data = pi->ir_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) spin_lock_irqsave(&svm->ir_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) list_add(&ir->node, &svm->ir_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) spin_unlock_irqrestore(&svm->ir_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * The HW cannot support posting multicast/broadcast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * interrupts to a vCPU. So, we still use legacy interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * remapping for these kind of interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * For lowest-priority interrupts, we only support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * those with single CPU as the destination, e.g. user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * configures the interrupts via /proc/irq or uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * irqbalance to make the interrupts single-CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) struct vcpu_data *vcpu_info, struct vcpu_svm **svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) struct kvm_lapic_irq irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) struct kvm_vcpu *vcpu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) kvm_set_msi_irq(kvm, e, &irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) !kvm_irq_is_postable(&irq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) pr_debug("SVM: %s: use legacy intr remap mode for irq %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) __func__, irq.vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) pr_debug("SVM: %s: use GA mode for irq %u\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) irq.vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) vcpu_info->vector = irq.vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * svm_update_pi_irte - set IRTE for Posted-Interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * @kvm: kvm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * @host_irq: host irq of the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * @guest_irq: gsi of the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * @set: set or unset PI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * returns 0 on success, < 0 on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) uint32_t guest_irq, bool set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct kvm_kernel_irq_routing_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) struct kvm_irq_routing_table *irq_rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) int idx, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (!kvm_arch_has_assigned_device(kvm) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) !irq_remapping_cap(IRQ_POSTING_CAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) pr_debug("SVM: %s: host_irq=%#x, guest_irq=%#x, set=%#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) __func__, host_irq, guest_irq, set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) idx = srcu_read_lock(&kvm->irq_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (guest_irq >= irq_rt->nr_rt_entries ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) hlist_empty(&irq_rt->map[guest_irq])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) guest_irq, irq_rt->nr_rt_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) struct vcpu_data vcpu_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) struct vcpu_svm *svm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (e->type != KVM_IRQ_ROUTING_MSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * Here, we setup with legacy mode in the following cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * 1. When cannot target interrupt to a specific vcpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * 2. Unsetting posted interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * 3. APIC virtialization is disabled for the vcpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * 4. IRQ has incompatible delivery mode (SMI, INIT, etc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) kvm_vcpu_apicv_active(&svm->vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) struct amd_iommu_pi_data pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /* Try to enable guest_mode in IRTE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) AVIC_HPA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) pi.ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) svm->vcpu.vcpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) pi.is_guest_mode = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) pi.vcpu_data = &vcpu_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) ret = irq_set_vcpu_affinity(host_irq, &pi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * Here, we successfully setting up vcpu affinity in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * IOMMU guest mode. Now, we need to store the posted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * interrupt information in a per-vcpu ir_list so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * we can reference to them directly when we update vcpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * scheduling information in IOMMU irte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (!ret && pi.is_guest_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) svm_ir_list_add(svm, &pi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) /* Use legacy mode in IRTE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) struct amd_iommu_pi_data pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * Here, pi is used to:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * - Tell IOMMU to use legacy mode for this interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * - Retrieve ga_tag of prior interrupt remapping data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) pi.prev_ga_tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) pi.is_guest_mode = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) ret = irq_set_vcpu_affinity(host_irq, &pi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * Check if the posted interrupt was previously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * setup with the guest_mode by checking if the ga_tag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * was cached. If so, we need to clean up the per-vcpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * ir_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (!ret && pi.prev_ga_tag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) vcpu = kvm_get_vcpu_by_id(kvm, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) svm_ir_list_del(to_svm(vcpu), &pi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (!ret && svm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) e->gsi, vcpu_info.vector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) vcpu_info.pi_desc_addr, set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) pr_err("%s: failed to update PI IRTE\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) srcu_read_unlock(&kvm->irq_srcu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) bool svm_check_apicv_inhibit_reasons(ulong bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) BIT(APICV_INHIBIT_REASON_HYPERV) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) BIT(APICV_INHIBIT_REASON_NESTED) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) BIT(APICV_INHIBIT_REASON_IRQWIN) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) BIT(APICV_INHIBIT_REASON_PIT_REINJ) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) BIT(APICV_INHIBIT_REASON_X2APIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) return supported & BIT(bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) void svm_pre_update_apicv_exec_ctrl(struct kvm *kvm, bool activate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) avic_update_access_page(kvm, activate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) struct amd_svm_iommu_ir *ir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (!kvm_arch_has_assigned_device(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * Here, we go through the per-vcpu ir_list to update all existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * interrupt remapping table entry targeting this vcpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) spin_lock_irqsave(&svm->ir_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (list_empty(&svm->ir_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) list_for_each_entry(ir, &svm->ir_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) ret = amd_iommu_update_ga(cpu, r, ir->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) spin_unlock_irqrestore(&svm->ir_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) u64 entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) /* ID = 0xff (broadcast), ID > 0xff (reserved) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) int h_physical_id = kvm_cpu_get_apicid(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (!kvm_vcpu_apicv_active(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * Since the host physical APIC id is 8 bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * we can support host APIC ID upto 255.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) if (WARN_ON(h_physical_id > AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) entry = READ_ONCE(*(svm->avic_physical_id_cache));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) WARN_ON(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) if (svm->avic_is_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) avic_update_iommu_vcpu_affinity(vcpu, h_physical_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) svm->avic_is_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) void avic_vcpu_put(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) u64 entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (!kvm_vcpu_apicv_active(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) entry = READ_ONCE(*(svm->avic_physical_id_cache));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * This function is called during VCPU halt/unhalt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) svm->avic_is_running = is_run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (is_run)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) avic_vcpu_load(vcpu, vcpu->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) avic_vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) avic_set_running(vcpu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) kvm_vcpu_update_apicv(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) avic_set_running(vcpu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) }