^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Local APIC virtualization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2006 Qumranet, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2007 Novell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2007 Intel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright 2009 Red Hat, Inc. and/or its affiliates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Dor Laor <dor.laor@qumranet.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Gregory Haskins <ghaskins@novell.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Yaozu (Eddie) Dong <eddie.dong@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/kvm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/hrtimer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/math64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/msr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/current.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/apicdef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/jump_label.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "kvm_cache_regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "irq.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include "ioapic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include "x86.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include "cpuid.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include "hyperv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #ifndef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define mod_64(x, y) ((x) % (y))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define PRId64 "d"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define PRIx64 "llx"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define PRIu64 "u"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define PRIo64 "o"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* 14 is the version for Xeon and Pentium 8.4.8*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define LAPIC_MMIO_LENGTH (1 << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* followed define is not in apicdef.h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define MAX_APIC_VECTOR 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define APIC_VECTORS_PER_REG 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static bool lapic_timer_advance_dynamic __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100 /* clock cycles */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000 /* clock cycles */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define LAPIC_TIMER_ADVANCE_NS_INIT 1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define LAPIC_TIMER_ADVANCE_NS_MAX 5000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* step-by-step approximation to mitigate fluctuation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static inline int apic_test_vector(int vec, void *bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return apic_test_vector(vector, apic->regs + APIC_ISR) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) apic_test_vector(vector, apic->regs + APIC_IRR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static inline int __apic_test_and_set_vector(int vec, void *bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct static_key_deferred apic_hw_disabled __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct static_key_deferred apic_sw_disabled __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static inline int apic_enabled(struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define LVT_MASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define LINT_MASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return apic->vcpu->vcpu_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return pi_inject_timer && kvm_vcpu_apicv_active(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return kvm_x86_ops.set_hv_timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) && !(kvm_mwait_in_guest(vcpu->kvm) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) kvm_can_post_timer_interrupt(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) EXPORT_SYMBOL_GPL(kvm_can_use_hv_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return kvm_can_post_timer_interrupt(vcpu) && vcpu->mode == IN_GUEST_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) switch (map->mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) case KVM_APIC_MODE_X2APIC: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) u32 offset = (dest_id >> 16) * 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) u32 max_apic_id = map->max_apic_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (offset <= max_apic_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) u8 cluster_size = min(max_apic_id - offset + 1, 16U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) offset = array_index_nospec(offset, map->max_apic_id + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) *cluster = &map->phys_map[offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) *mask = dest_id & (0xffff >> (16 - cluster_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) *mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) case KVM_APIC_MODE_XAPIC_FLAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) *cluster = map->xapic_flat_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) *mask = dest_id & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) case KVM_APIC_MODE_XAPIC_CLUSTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) *mask = dest_id & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* Not optimized. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static void kvm_apic_map_free(struct rcu_head *rcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) kvfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * apic_map_lock_held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) CLEAN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) UPDATE_IN_PROGRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) DIRTY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) void kvm_recalculate_apic_map(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct kvm_apic_map *new, *old = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) u32 max_id = 255; /* enough space for any xAPIC ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) mutex_lock(&kvm->arch.apic_map_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * (if clean) or the APIC registers (if dirty).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) DIRTY, UPDATE_IN_PROGRESS) == CLEAN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /* Someone else has updated the map. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) mutex_unlock(&kvm->arch.apic_map_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) kvm_for_each_vcpu(i, vcpu, kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (kvm_apic_present(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) new = kvzalloc(sizeof(struct kvm_apic_map) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) sizeof(struct kvm_lapic *) * ((u64)max_id + 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) GFP_KERNEL_ACCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) new->max_apic_id = max_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) kvm_for_each_vcpu(i, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct kvm_lapic **cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) u16 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) u32 ldr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) u8 xapic_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) u32 x2apic_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (!kvm_apic_present(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) xapic_id = kvm_xapic_id(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) x2apic_id = kvm_x2apic_id(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /* Hotplug hack: see kvm_apic_match_physical_addr(), ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if ((apic_x2apic_mode(apic) || x2apic_id > 0xff) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) x2apic_id <= new->max_apic_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) new->phys_map[x2apic_id] = apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * ... xAPIC ID of VCPUs with APIC ID > 0xff will wrap-around,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * prevent them from masking VCPUs with APIC ID <= 0xff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) new->phys_map[xapic_id] = apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (!kvm_apic_sw_enabled(apic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) ldr = kvm_lapic_get_reg(apic, APIC_LDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (apic_x2apic_mode(apic)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) new->mode |= KVM_APIC_MODE_X2APIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) } else if (ldr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) ldr = GET_APIC_LOGICAL_ID(ldr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) new->mode |= KVM_APIC_MODE_XAPIC_FLAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (!kvm_apic_map_get_logical_dest(new, ldr, &cluster, &mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) cluster[ffs(mask) - 1] = apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) old = rcu_dereference_protected(kvm->arch.apic_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) lockdep_is_held(&kvm->arch.apic_map_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) rcu_assign_pointer(kvm->arch.apic_map, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * If another update has come in, leave it DIRTY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) UPDATE_IN_PROGRESS, CLEAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) mutex_unlock(&kvm->arch.apic_map_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) call_rcu(&old->rcu, kvm_apic_map_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) kvm_make_scan_ioapic_request(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) bool enabled = val & APIC_SPIV_APIC_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) kvm_lapic_set_reg(apic, APIC_SPIV, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (enabled != apic->sw_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) apic->sw_enabled = enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static_key_slow_dec_deferred(&apic_sw_disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static_key_slow_inc(&apic_sw_disabled.key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) kvm_lapic_set_reg(apic, APIC_ID, id << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) kvm_lapic_set_reg(apic, APIC_LDR, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static inline void kvm_apic_set_dfr(struct kvm_lapic *apic, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) kvm_lapic_set_reg(apic, APIC_DFR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return ((id >> 4) << 16) | (1 << (id & 0xf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) u32 ldr = kvm_apic_calc_x2apic_ldr(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) kvm_lapic_set_reg(apic, APIC_ID, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) kvm_lapic_set_reg(apic, APIC_LDR, ldr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static inline int apic_lvtt_period(struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static inline int apic_lvt_nmi_mode(u32 lvt_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) void kvm_apic_set_version(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) u32 v = APIC_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (!lapic_in_kernel(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * version first and level-triggered interrupts never get EOIed in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * IOAPIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) !ioapic_in_kernel(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) v |= APIC_LVR_DIRECTED_EOI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) kvm_lapic_set_reg(apic, APIC_LVR, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static const unsigned int apic_lvt_mask[KVM_APIC_LVT_NUM] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) LVT_MASK , /* part LVTT mask, timer mode mask added at runtime */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) LVT_MASK | APIC_MODE_MASK, /* LVTPC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) LINT_MASK, LINT_MASK, /* LVT0-1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) LVT_MASK /* LVTERR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) static int find_highest_vector(void *bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) int vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) u32 *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) vec >= 0; vec -= APIC_VECTORS_PER_REG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) reg = bitmap + REG_POS(vec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (*reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return __fls(*reg) + vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static u8 count_vectors(void *bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) int vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) u32 *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) u8 count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) reg = bitmap + REG_POS(vec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) count += hweight32(*reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) u32 i, vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) u32 pir_val, irr_val, prev_irr_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) int max_updated_irr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) max_updated_irr = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) *max_irr = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) for (i = vec = 0; i <= 7; i++, vec += 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) pir_val = READ_ONCE(pir[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) irr_val = *((u32 *)(regs + APIC_IRR + i * 0x10));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (pir_val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) prev_irr_val = irr_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) irr_val |= xchg(&pir[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) *((u32 *)(regs + APIC_IRR + i * 0x10)) = irr_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (prev_irr_val != irr_val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) max_updated_irr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) __fls(irr_val ^ prev_irr_val) + vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (irr_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) *max_irr = __fls(irr_val) + vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return ((max_updated_irr != -1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) (max_updated_irr == *max_irr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return __kvm_apic_update_irr(pir, apic->regs, max_irr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static inline int apic_search_irr(struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return find_highest_vector(apic->regs + APIC_IRR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static inline int apic_find_highest_irr(struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * Note that irr_pending is just a hint. It will be always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * true with virtual interrupt delivery enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (!apic->irr_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) result = apic_search_irr(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) ASSERT(result == -1 || result >= 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) vcpu = apic->vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (unlikely(vcpu->arch.apicv_active)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /* need to update RVI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) kvm_x86_ops.hwapic_irr_update(vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) apic_find_highest_irr(apic));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) apic->irr_pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (apic_search_irr(apic) != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) apic->irr_pending = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) apic_clear_irr(vec, vcpu->arch.apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) EXPORT_SYMBOL_GPL(kvm_apic_clear_irr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) vcpu = apic->vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * With APIC virtualization enabled, all caching is disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * because the processor can modify ISR under the hood. Instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * just set SVI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (unlikely(vcpu->arch.apicv_active))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) kvm_x86_ops.hwapic_isr_update(vcpu, vec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) ++apic->isr_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * ISR (in service register) bit is set when injecting an interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * The highest vector is injected. Thus the latest bit set matches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * the highest bit in ISR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) apic->highest_isr_cache = vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) static inline int apic_find_highest_isr(struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * Note that isr_count is always 1, and highest_isr_cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * is always -1, with APIC virtualization enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (!apic->isr_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (likely(apic->highest_isr_cache != -1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return apic->highest_isr_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) result = find_highest_vector(apic->regs + APIC_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) ASSERT(result == -1 || result >= 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) vcpu = apic->vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * We do get here for APIC virtualization enabled if the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * uses the Hyper-V APIC enlightenment. In this case we may need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * to trigger a new interrupt delivery by writing the SVI field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * on the other hand isr_count and highest_isr_cache are unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * and must be left alone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (unlikely(vcpu->arch.apicv_active))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) kvm_x86_ops.hwapic_isr_update(vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) apic_find_highest_isr(apic));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) --apic->isr_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) BUG_ON(apic->isr_count < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) apic->highest_isr_cache = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /* This may race with setting of irr in __apic_accept_irq() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * will cause vmexit immediately and the value will be recalculated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * on the next vmentry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return apic_find_highest_irr(vcpu->arch.apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) int vector, int level, int trig_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) struct dest_map *dest_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct dest_map *dest_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) irq->level, irq->trig_mode, dest_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) struct kvm_lapic_irq *irq, u32 min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) int i, count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (min > map->max_apic_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) for_each_set_bit(i, ipi_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (map->phys_map[min + i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) vcpu = map->phys_map[min + i]->vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) count += kvm_apic_set_irq(vcpu, irq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) unsigned long ipi_bitmap_high, u32 min,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) unsigned long icr, int op_64_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) struct kvm_apic_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) struct kvm_lapic_irq irq = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) int cluster_size = op_64_bit ? 64 : 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (icr & (APIC_DEST_MASK | APIC_SHORT_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return -KVM_EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) irq.vector = icr & APIC_VECTOR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) irq.delivery_mode = icr & APIC_MODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) irq.level = (icr & APIC_INT_ASSERT) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) irq.trig_mode = icr & APIC_INT_LEVELTRIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) map = rcu_dereference(kvm->arch.apic_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) count = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (likely(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) count = __pv_send_ipi(&ipi_bitmap_low, map, &irq, min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) min += cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) count += __pv_send_ipi(&ipi_bitmap_high, map, &irq, min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) sizeof(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) sizeof(*val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (pv_eoi_get_user(vcpu, &val) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) printk(KERN_WARNING "Can't read EOI MSR value: 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) (unsigned long long)vcpu->arch.pv_eoi.msr_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) return val & 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) printk(KERN_WARNING "Can't set EOI MSR value: 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) (unsigned long long)vcpu->arch.pv_eoi.msr_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) printk(KERN_WARNING "Can't clear EOI MSR value: 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) (unsigned long long)vcpu->arch.pv_eoi.msr_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) int highest_irr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (apic->vcpu->arch.apicv_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) highest_irr = kvm_x86_ops.sync_pir_to_irr(apic->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) highest_irr = apic_find_highest_irr(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) return highest_irr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) u32 tpr, isrv, ppr, old_ppr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) int isr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) isr = apic_find_highest_isr(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) isrv = (isr != -1) ? isr : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if ((tpr & 0xf0) >= (isrv & 0xf0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) ppr = tpr & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) ppr = isrv & 0xf0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) *new_ppr = ppr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (old_ppr != ppr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return ppr < old_ppr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) static void apic_update_ppr(struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) u32 ppr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (__apic_update_ppr(apic, &ppr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) apic_has_interrupt_for_ppr(apic, ppr) != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) apic_update_ppr(vcpu->arch.apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) EXPORT_SYMBOL_GPL(kvm_apic_update_ppr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) apic_update_ppr(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) return mda == (apic_x2apic_mode(apic) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) X2APIC_BROADCAST : APIC_BROADCAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (kvm_apic_broadcast(apic, mda))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (apic_x2apic_mode(apic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return mda == kvm_x2apic_id(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * Hotplug hack: Make LAPIC in xAPIC mode also accept interrupts as if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * it were in x2APIC mode. Hotplugged VCPUs start in xAPIC mode and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * this allows unique addressing of VCPUs with APIC ID over 0xff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * The 0xff condition is needed because writeable xAPIC ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (kvm_x2apic_id(apic) > 0xff && mda == kvm_x2apic_id(apic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) return mda == kvm_xapic_id(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) u32 logical_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (kvm_apic_broadcast(apic, mda))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (apic_x2apic_mode(apic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return ((logical_id >> 16) == (mda >> 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) && (logical_id & mda & 0xffff) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) logical_id = GET_APIC_LOGICAL_ID(logical_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) case APIC_DFR_FLAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) return (logical_id & mda) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) case APIC_DFR_CLUSTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return ((logical_id >> 4) == (mda >> 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) && (logical_id & mda & 0xf) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) /* The KVM local APIC implementation has two quirks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * in xAPIC mode if the "destination & 0xff" matches its xAPIC ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * KVM doesn't do that aliasing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * - in-kernel IOAPIC messages have to be delivered directly to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * x2APIC, because the kernel does not support interrupt remapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * In order to support broadcast without interrupt remapping, x2APIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * rewrites the destination of non-IPI messages from APIC_BROADCAST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * to X2APIC_BROADCAST.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * important when userspace wants to use x2APIC-format MSIs, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct kvm_lapic *source, struct kvm_lapic *target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) bool ipi = source != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) !ipi && dest_id == APIC_BROADCAST && apic_x2apic_mode(target))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) return X2APIC_BROADCAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return dest_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) int shorthand, unsigned int dest, int dest_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) struct kvm_lapic *target = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) u32 mda = kvm_apic_mda(vcpu, dest, source, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) ASSERT(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) switch (shorthand) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) case APIC_DEST_NOSHORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (dest_mode == APIC_DEST_PHYSICAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) return kvm_apic_match_physical_addr(target, mda);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) return kvm_apic_match_logical_addr(target, mda);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) case APIC_DEST_SELF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) return target == source;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) case APIC_DEST_ALLINC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) case APIC_DEST_ALLBUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) return target != source;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) const unsigned long *bitmap, u32 bitmap_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) u32 mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) int i, idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) mod = vector % dest_vcpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) for (i = 0; i <= mod; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) idx = find_next_bit(bitmap, bitmap_size, idx + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) BUG_ON(idx == bitmap_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (!kvm->arch.disabled_lapic_found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) kvm->arch.disabled_lapic_found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) printk(KERN_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) "Disabled LAPIC found during irq injection\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (kvm->arch.x2apic_broadcast_quirk_disabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) if ((irq->dest_id == APIC_BROADCAST &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) map->mode != KVM_APIC_MODE_X2APIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (irq->dest_id == X2APIC_BROADCAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) bool x2apic_ipi = src && *src && apic_x2apic_mode(*src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (irq->dest_id == (x2apic_ipi ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) X2APIC_BROADCAST : APIC_BROADCAST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) /* Return true if the interrupt can be handled by using *bitmap as index mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) * for valid destinations in *dst array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * Note: we may have zero kvm_lapic destinations when we return true, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * means that the interrupt should be dropped. In this case, *bitmap would be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * zero and *dst undefined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) struct kvm_lapic **src, struct kvm_lapic_irq *irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) struct kvm_apic_map *map, struct kvm_lapic ***dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) unsigned long *bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) int i, lowest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) if (irq->shorthand == APIC_DEST_SELF && src) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) *dst = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) *bitmap = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) } else if (irq->shorthand)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (irq->dest_mode == APIC_DEST_PHYSICAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (irq->dest_id > map->max_apic_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) *bitmap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) *dst = &map->phys_map[dest_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) *bitmap = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) *bitmap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) (u16 *)bitmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (!kvm_lowest_prio_delivery(irq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (!kvm_vector_hashing_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) lowest = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) for_each_set_bit(i, bitmap, 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (!(*dst)[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (lowest < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) lowest = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) (*dst)[lowest]->vcpu) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) lowest = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (!*bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) bitmap, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (!(*dst)[lowest]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) kvm_apic_disabled_lapic_found(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) *bitmap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) *bitmap = (lowest >= 0) ? 1 << lowest : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) struct kvm_apic_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) unsigned long bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) struct kvm_lapic **dst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) *r = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (irq->shorthand == APIC_DEST_SELF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) map = rcu_dereference(kvm->arch.apic_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) *r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) for_each_set_bit(i, &bitmap, 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (!dst[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * This routine tries to handle interrupts in posted mode, here is how
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * it deals with different cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * - For single-destination interrupts, handle it in posted mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * - Else if vector hashing is enabled and it is a lowest-priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * interrupt, handle it in posted mode and use the following mechanism
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * to find the destination vCPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) * 1. For lowest-priority interrupts, store all the possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * destination vCPUs in an array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * 2. Use "guest vector % max number of destination vCPUs" to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) * the right destination vCPU in the array for the lowest-priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) * interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) * - Otherwise, use remapped mode to inject the interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) struct kvm_vcpu **dest_vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) struct kvm_apic_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) unsigned long bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) struct kvm_lapic **dst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (irq->shorthand)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) map = rcu_dereference(kvm->arch.apic_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) hweight16(bitmap) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) unsigned long i = find_first_bit(&bitmap, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (dst[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) *dest_vcpu = dst[i]->vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * Add a pending IRQ into lapic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * Return 1 if successfully added and 0 if discarded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) int vector, int level, int trig_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) struct dest_map *dest_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) struct kvm_vcpu *vcpu = apic->vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) trig_mode, vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) switch (delivery_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) case APIC_DM_LOWEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) vcpu->arch.apic_arb_prio++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) case APIC_DM_FIXED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) if (unlikely(trig_mode && !level))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) /* FIXME add logic for vcpu on reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (unlikely(!apic_enabled(apic)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) result = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (dest_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) __set_bit(vcpu->vcpu_id, dest_map->map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) dest_map->vectors[vcpu->vcpu_id] = vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (trig_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) kvm_lapic_set_vector(vector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) apic->regs + APIC_TMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) kvm_lapic_clear_vector(vector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) apic->regs + APIC_TMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (kvm_x86_ops.deliver_posted_interrupt(vcpu, vector)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) kvm_lapic_set_irr(vector, apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) kvm_make_request(KVM_REQ_EVENT, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) kvm_vcpu_kick(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) case APIC_DM_REMRD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) result = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) vcpu->arch.pv.pv_unhalted = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) kvm_make_request(KVM_REQ_EVENT, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) kvm_vcpu_kick(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) case APIC_DM_SMI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) result = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) kvm_make_request(KVM_REQ_SMI, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) kvm_vcpu_kick(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) case APIC_DM_NMI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) result = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) kvm_inject_nmi(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) kvm_vcpu_kick(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) case APIC_DM_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (!trig_mode || level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) result = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) /* assumes that there are only KVM_APIC_INIT/SIPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) apic->pending_events = (1UL << KVM_APIC_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) kvm_make_request(KVM_REQ_EVENT, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) kvm_vcpu_kick(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) case APIC_DM_STARTUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) result = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) apic->sipi_vector = vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) /* make sure sipi_vector is visible for the receiver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) set_bit(KVM_APIC_SIPI, &apic->pending_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) kvm_make_request(KVM_REQ_EVENT, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) kvm_vcpu_kick(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) case APIC_DM_EXTINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * Should only be called by kvm_apic_local_deliver() with LVT0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * before NMI watchdog was enabled. Already handled by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * kvm_apic_accept_pic_intr().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) delivery_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) * This routine identifies the destination vcpus mask meant to receive the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * IOAPIC interrupts. It either uses kvm_apic_map_get_dest_lapic() to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * out the destination vcpus array and set the bitmap or it traverses to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) * each available vcpu to identify the same.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) unsigned long *vcpu_bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) struct kvm_lapic **dest_vcpu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) struct kvm_lapic *src = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) struct kvm_apic_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) unsigned long bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) int i, vcpu_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) map = rcu_dereference(kvm->arch.apic_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dest_vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) &bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) for_each_set_bit(i, &bitmap, 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (!dest_vcpu[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) vcpu_idx = dest_vcpu[i]->vcpu->vcpu_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) __set_bit(vcpu_idx, vcpu_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) kvm_for_each_vcpu(i, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (!kvm_apic_present(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) if (!kvm_apic_match_dest(vcpu, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) irq->shorthand,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) irq->dest_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) irq->dest_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) __set_bit(i, vcpu_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) int trigger_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) /* Eoi the ioapic only if the ioapic doesn't own the vector. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (!kvm_ioapic_handles_vector(apic, vector))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) /* Request a KVM exit to inform the userspace IOAPIC. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) if (irqchip_split(apic->vcpu->kvm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) apic->vcpu->arch.pending_ioapic_eoi = vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) if (apic_test_vector(vector, apic->regs + APIC_TMR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) trigger_mode = IOAPIC_LEVEL_TRIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) trigger_mode = IOAPIC_EDGE_TRIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) static int apic_set_eoi(struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) int vector = apic_find_highest_isr(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) trace_kvm_eoi(apic, vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * Not every write EOI will has corresponding ISR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) * one example is when Kernel check timer on setup_IO_APIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) if (vector == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) return vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) apic_clear_isr(vector, apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) apic_update_ppr(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) if (test_bit(vector, vcpu_to_synic(apic->vcpu)->vec_bitmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) kvm_hv_synic_send_eoi(apic->vcpu, vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) kvm_ioapic_send_eoi(apic, vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) return vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * this interface assumes a trap-like exit, which has already finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) * desired side effect including vISR and vPPR update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) trace_kvm_eoi(apic, vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) kvm_ioapic_send_eoi(apic, vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) struct kvm_lapic_irq irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) irq.vector = icr_low & APIC_VECTOR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) irq.delivery_mode = icr_low & APIC_MODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) irq.dest_mode = icr_low & APIC_DEST_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) irq.level = (icr_low & APIC_INT_ASSERT) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) irq.shorthand = icr_low & APIC_SHORT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) irq.msi_redir_hint = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) if (apic_x2apic_mode(apic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) irq.dest_id = icr_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) irq.dest_id = GET_APIC_DEST_FIELD(icr_high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) trace_kvm_apic_ipi(icr_low, irq.dest_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) static u32 apic_get_tmcct(struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) ktime_t remaining, now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) s64 ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) u32 tmcct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) ASSERT(apic != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) /* if initial count is 0, current count should also be 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) apic->lapic_timer.period == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) now = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (ktime_to_ns(remaining) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) remaining = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) tmcct = div64_u64(ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) (APIC_BUS_CYCLE_NS * apic->divide_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) return tmcct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) static void __report_tpr_access(struct kvm_lapic *apic, bool write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) struct kvm_vcpu *vcpu = apic->vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) struct kvm_run *run = vcpu->run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) run->tpr_access.rip = kvm_rip_read(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) run->tpr_access.is_write = write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) if (apic->vcpu->arch.tpr_access_reporting)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) __report_tpr_access(apic, write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) if (offset >= LAPIC_MMIO_LENGTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) switch (offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) case APIC_ARBPRI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) case APIC_TMCCT: /* Timer CCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) if (apic_lvtt_tscdeadline(apic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) val = apic_get_tmcct(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) case APIC_PROCPRI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) apic_update_ppr(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) val = kvm_lapic_get_reg(apic, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) case APIC_TASKPRI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) report_tpr_access(apic, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) val = kvm_lapic_get_reg(apic, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) return container_of(dev, struct kvm_lapic, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) #define APIC_REG_MASK(reg) (1ull << ((reg) >> 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) #define APIC_REGS_MASK(first, count) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) (APIC_REG_MASK(first) * ((1ull << (count)) - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) unsigned char alignment = offset & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) u32 result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) /* this bitmask has a bit cleared for each reserved register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) u64 valid_reg_mask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) APIC_REG_MASK(APIC_ID) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) APIC_REG_MASK(APIC_LVR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) APIC_REG_MASK(APIC_TASKPRI) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) APIC_REG_MASK(APIC_PROCPRI) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) APIC_REG_MASK(APIC_LDR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) APIC_REG_MASK(APIC_DFR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) APIC_REG_MASK(APIC_SPIV) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) APIC_REGS_MASK(APIC_ISR, APIC_ISR_NR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) APIC_REGS_MASK(APIC_TMR, APIC_ISR_NR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) APIC_REGS_MASK(APIC_IRR, APIC_ISR_NR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) APIC_REG_MASK(APIC_ESR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) APIC_REG_MASK(APIC_ICR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) APIC_REG_MASK(APIC_ICR2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) APIC_REG_MASK(APIC_LVTT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) APIC_REG_MASK(APIC_LVTTHMR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) APIC_REG_MASK(APIC_LVTPC) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) APIC_REG_MASK(APIC_LVT0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) APIC_REG_MASK(APIC_LVT1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) APIC_REG_MASK(APIC_LVTERR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) APIC_REG_MASK(APIC_TMICT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) APIC_REG_MASK(APIC_TMCCT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) APIC_REG_MASK(APIC_TDCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) /* ARBPRI is not valid on x2APIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (!apic_x2apic_mode(apic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (alignment + len > 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) if (offset > 0x3f0 || !(valid_reg_mask & APIC_REG_MASK(offset)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) result = __apic_read(apic, offset & ~0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) trace_kvm_apic_read(offset, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) switch (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) memcpy(data, (char *)&result + alignment, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) printk(KERN_ERR "Local APIC read with len = %x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) "should be 1,2, or 4 instead\n", len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) EXPORT_SYMBOL_GPL(kvm_lapic_reg_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) return addr >= apic->base_address &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) addr < apic->base_address + LAPIC_MMIO_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) gpa_t address, int len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) struct kvm_lapic *apic = to_lapic(this);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) u32 offset = address - apic->base_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) if (!apic_mmio_in_range(apic, address))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) if (!kvm_check_has_quirk(vcpu->kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) memset(data, 0xff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) kvm_lapic_reg_read(apic, offset, len, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) static void update_divide_count(struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) u32 tmp1, tmp2, tdcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) tmp1 = tdcr & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) apic->divide_count = 0x1 << (tmp2 & 0x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) * Do not allow the guest to program periodic timers with small
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) * interval, since the hrtimers are not throttled by the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) * scheduler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) s64 min_period = min_timer_period_us * 1000LL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) if (apic->lapic_timer.period < min_period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) pr_info_ratelimited(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) "kvm: vcpu %i: requested %lld ns "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) "lapic timer period limited to %lld ns\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) apic->vcpu->vcpu_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) apic->lapic_timer.period, min_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) apic->lapic_timer.period = min_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) static void cancel_hv_timer(struct kvm_lapic *apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) static void apic_update_lvtt(struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) apic->lapic_timer.timer_mode_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) if (apic->lapic_timer.timer_mode != timer_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) APIC_LVT_TIMER_TSCDEADLINE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) hrtimer_cancel(&apic->lapic_timer.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) if (apic->lapic_timer.hv_timer_in_use)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) cancel_hv_timer(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) kvm_lapic_set_reg(apic, APIC_TMICT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) apic->lapic_timer.period = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) apic->lapic_timer.tscdeadline = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) apic->lapic_timer.timer_mode = timer_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) limit_periodic_timer_frequency(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) * On APICv, this test will cause a busy wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) * during a higher-priority task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) if (kvm_apic_hw_enabled(apic)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) int vec = reg & APIC_VECTOR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) void *bitmap = apic->regs + APIC_ISR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) if (vcpu->arch.apicv_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) bitmap = apic->regs + APIC_IRR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) if (apic_test_vector(vec, bitmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) * If the guest TSC is running at a different ratio than the host, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) * convert the delay to nanoseconds to achieve an accurate delay. Note
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) * that __delay() uses delay_tsc whenever the hardware has TSC, thus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) * always for VMX enabled hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) if (vcpu->arch.tsc_scaling_ratio == kvm_default_tsc_scaling_ratio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) __delay(min(guest_cycles,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) nsec_to_cycles(vcpu, timer_advance_ns)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) u64 delay_ns = guest_cycles * 1000000ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) do_div(delay_ns, vcpu->arch.virtual_tsc_khz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) ndelay(min_t(u32, delay_ns, timer_advance_ns));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) s64 advance_expire_delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) u64 ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) /* Do not adjust for tiny fluctuations or large random spikes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) if (abs(advance_expire_delta) > LAPIC_TIMER_ADVANCE_ADJUST_MAX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) abs(advance_expire_delta) < LAPIC_TIMER_ADVANCE_ADJUST_MIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) /* too early */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) if (advance_expire_delta < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) ns = -advance_expire_delta * 1000000ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) do_div(ns, vcpu->arch.virtual_tsc_khz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) timer_advance_ns -= ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) /* too late */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) ns = advance_expire_delta * 1000000ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) do_div(ns, vcpu->arch.virtual_tsc_khz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) timer_advance_ns += ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_NS_MAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) apic->lapic_timer.timer_advance_ns = timer_advance_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) u64 guest_tsc, tsc_deadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) tsc_deadline = apic->lapic_timer.expired_tscdeadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) apic->lapic_timer.expired_tscdeadline = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) apic->lapic_timer.advance_expire_delta = guest_tsc - tsc_deadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) if (guest_tsc < tsc_deadline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) if (lapic_timer_advance_dynamic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) if (lapic_in_kernel(vcpu) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) vcpu->arch.apic->lapic_timer.timer_advance_ns &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) lapic_timer_int_injected(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) __kvm_wait_lapic_expire(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) struct kvm_timer *ktimer = &apic->lapic_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) kvm_apic_local_deliver(apic, APIC_LVTT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) if (apic_lvtt_tscdeadline(apic)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) ktimer->tscdeadline = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) } else if (apic_lvtt_oneshot(apic)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) ktimer->tscdeadline = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) ktimer->target_expiration = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) struct kvm_vcpu *vcpu = apic->vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) struct kvm_timer *ktimer = &apic->lapic_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) if (atomic_read(&apic->lapic_timer.pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) ktimer->expired_tscdeadline = ktimer->tscdeadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) if (!from_timer_fn && vcpu->arch.apicv_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) WARN_ON(kvm_get_running_vcpu() != vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) kvm_apic_inject_pending_timer_irqs(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) * Ensure the guest's timer has truly expired before posting an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) * interrupt. Open code the relevant checks to avoid querying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) * lapic_timer_int_injected(), which will be false since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) * interrupt isn't yet injected. Waiting until after injecting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) * is not an option since that won't help a posted interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) if (vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) vcpu->arch.apic->lapic_timer.timer_advance_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) __kvm_wait_lapic_expire(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) kvm_apic_inject_pending_timer_irqs(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) atomic_inc(&apic->lapic_timer.pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) if (from_timer_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) kvm_vcpu_kick(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) static void start_sw_tscdeadline(struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) struct kvm_timer *ktimer = &apic->lapic_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) u64 guest_tsc, tscdeadline = ktimer->tscdeadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) u64 ns = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) ktime_t expire;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) struct kvm_vcpu *vcpu = apic->vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) ktime_t now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) if (unlikely(!tscdeadline || !this_tsc_khz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) now = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) ns = (tscdeadline - guest_tsc) * 1000000ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) do_div(ns, this_tsc_khz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) if (likely(tscdeadline > guest_tsc) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) likely(ns > apic->lapic_timer.timer_advance_ns)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) expire = ktime_add_ns(now, ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_HARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) apic_timer_expired(apic, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) static inline u64 tmict_to_ns(struct kvm_lapic *apic, u32 tmict)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) return (u64)tmict * APIC_BUS_CYCLE_NS * (u64)apic->divide_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) ktime_t now, remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) u64 ns_remaining_old, ns_remaining_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) apic->lapic_timer.period =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) limit_periodic_timer_frequency(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) now = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) if (ktime_to_ns(remaining) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) remaining = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) ns_remaining_old = ktime_to_ns(remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) ns_remaining_new = mul_u64_u32_div(ns_remaining_old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) apic->divide_count, old_divisor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) apic->lapic_timer.tscdeadline +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) nsec_to_cycles(apic->vcpu, ns_remaining_new) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) nsec_to_cycles(apic->vcpu, ns_remaining_old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) static bool set_target_expiration(struct kvm_lapic *apic, u32 count_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) ktime_t now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) u64 tscl = rdtsc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) s64 deadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) now = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) apic->lapic_timer.period =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) if (!apic->lapic_timer.period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) apic->lapic_timer.tscdeadline = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) limit_periodic_timer_frequency(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) deadline = apic->lapic_timer.period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) if (unlikely(count_reg != APIC_TMICT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) deadline = tmict_to_ns(apic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) kvm_lapic_get_reg(apic, count_reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (unlikely(deadline <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) deadline = apic->lapic_timer.period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) else if (unlikely(deadline > apic->lapic_timer.period)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) pr_info_ratelimited(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) "kvm: vcpu %i: requested lapic timer restore with "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) "starting count register %#x=%u (%lld ns) > initial count (%lld ns). "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) "Using initial count to start timer.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) apic->vcpu->vcpu_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) count_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) kvm_lapic_get_reg(apic, count_reg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) deadline, apic->lapic_timer.period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) kvm_lapic_set_reg(apic, count_reg, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) deadline = apic->lapic_timer.period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) nsec_to_cycles(apic->vcpu, deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) apic->lapic_timer.target_expiration = ktime_add_ns(now, deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) static void advance_periodic_target_expiration(struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) ktime_t now = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) u64 tscl = rdtsc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) ktime_t delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) * Synchronize both deadlines to the same time source or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) * differences in the periods (caused by differences in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) * underlying clocks or numerical approximation errors) will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) * cause the two to drift apart over time as the errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) * accumulate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) apic->lapic_timer.target_expiration =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) ktime_add_ns(apic->lapic_timer.target_expiration,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) apic->lapic_timer.period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) delta = ktime_sub(apic->lapic_timer.target_expiration, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) nsec_to_cycles(apic->vcpu, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) static void start_sw_period(struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) if (!apic->lapic_timer.period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (ktime_after(ktime_get(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) apic->lapic_timer.target_expiration)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) apic_timer_expired(apic, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) if (apic_lvtt_oneshot(apic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) advance_periodic_target_expiration(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) hrtimer_start(&apic->lapic_timer.timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) apic->lapic_timer.target_expiration,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) HRTIMER_MODE_ABS_HARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) if (!lapic_in_kernel(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) static void cancel_hv_timer(struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) WARN_ON(preemptible());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) WARN_ON(!apic->lapic_timer.hv_timer_in_use);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) kvm_x86_ops.cancel_hv_timer(apic->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) apic->lapic_timer.hv_timer_in_use = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) static bool start_hv_timer(struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) struct kvm_timer *ktimer = &apic->lapic_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) struct kvm_vcpu *vcpu = apic->vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) bool expired;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) WARN_ON(preemptible());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) if (!kvm_can_use_hv_timer(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) if (!ktimer->tscdeadline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) if (kvm_x86_ops.set_hv_timer(vcpu, ktimer->tscdeadline, &expired))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) ktimer->hv_timer_in_use = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) hrtimer_cancel(&ktimer->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) * To simplify handling the periodic timer, leave the hv timer running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) * even if the deadline timer has expired, i.e. rely on the resulting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) * VM-Exit to recompute the periodic timer's target expiration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) if (!apic_lvtt_period(apic)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) * Cancel the hv timer if the sw timer fired while the hv timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) * was being programmed, or if the hv timer itself expired.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) if (atomic_read(&ktimer->pending)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) cancel_hv_timer(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) } else if (expired) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) apic_timer_expired(apic, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) cancel_hv_timer(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) trace_kvm_hv_timer_state(vcpu->vcpu_id, ktimer->hv_timer_in_use);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) static void start_sw_timer(struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) struct kvm_timer *ktimer = &apic->lapic_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) WARN_ON(preemptible());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) if (apic->lapic_timer.hv_timer_in_use)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) cancel_hv_timer(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) start_sw_period(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) else if (apic_lvtt_tscdeadline(apic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) start_sw_tscdeadline(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) static void restart_apic_timer(struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) if (!apic_lvtt_period(apic) && atomic_read(&apic->lapic_timer.pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) if (!start_hv_timer(apic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) start_sw_timer(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) /* If the preempt notifier has already run, it also called apic_timer_expired */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) if (!apic->lapic_timer.hv_timer_in_use)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) WARN_ON(rcuwait_active(&vcpu->wait));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) apic_timer_expired(apic, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) cancel_hv_timer(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) advance_periodic_target_expiration(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) restart_apic_timer(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) restart_apic_timer(vcpu->arch.apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) /* Possibly the TSC deadline timer is not enabled yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) if (apic->lapic_timer.hv_timer_in_use)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) start_sw_timer(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) WARN_ON(!apic->lapic_timer.hv_timer_in_use);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) restart_apic_timer(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) static void __start_apic_timer(struct kvm_lapic *apic, u32 count_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) atomic_set(&apic->lapic_timer.pending, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) && !set_target_expiration(apic, count_reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) restart_apic_timer(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) static void start_apic_timer(struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) __start_apic_timer(apic, APIC_TMICT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) if (lvt0_in_nmi_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) trace_kvm_apic_write(reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) case APIC_ID: /* Local APIC ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) if (!apic_x2apic_mode(apic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) kvm_apic_set_xapic_id(apic, val >> 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) case APIC_TASKPRI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) report_tpr_access(apic, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) apic_set_tpr(apic, val & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) case APIC_EOI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) apic_set_eoi(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) case APIC_LDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) if (!apic_x2apic_mode(apic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) case APIC_DFR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) if (!apic_x2apic_mode(apic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) kvm_apic_set_dfr(apic, val | 0x0FFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) case APIC_SPIV: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) u32 mask = 0x3ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) mask |= APIC_SPIV_DIRECTED_EOI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) apic_set_spiv(apic, val & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) if (!(val & APIC_SPIV_APIC_ENABLED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) u32 lvt_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) for (i = 0; i < KVM_APIC_LVT_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) lvt_val = kvm_lapic_get_reg(apic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) APIC_LVTT + 0x10 * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) lvt_val | APIC_LVT_MASKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) apic_update_lvtt(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) atomic_set(&apic->lapic_timer.pending, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) case APIC_ICR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) /* No delay here, so we always clear the pending bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) val &= ~(1 << 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) kvm_apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) kvm_lapic_set_reg(apic, APIC_ICR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) case APIC_ICR2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) if (!apic_x2apic_mode(apic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) val &= 0xff000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) kvm_lapic_set_reg(apic, APIC_ICR2, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) case APIC_LVT0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) apic_manage_nmi_watchdog(apic, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) case APIC_LVTTHMR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) case APIC_LVTPC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) case APIC_LVT1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) case APIC_LVTERR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) /* TODO: Check vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) u32 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) if (!kvm_apic_sw_enabled(apic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) val |= APIC_LVT_MASKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) size = ARRAY_SIZE(apic_lvt_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) index = array_index_nospec(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) (reg - APIC_LVTT) >> 4, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) val &= apic_lvt_mask[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) kvm_lapic_set_reg(apic, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) case APIC_LVTT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) if (!kvm_apic_sw_enabled(apic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) val |= APIC_LVT_MASKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) kvm_lapic_set_reg(apic, APIC_LVTT, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) apic_update_lvtt(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) case APIC_TMICT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) if (apic_lvtt_tscdeadline(apic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) hrtimer_cancel(&apic->lapic_timer.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) kvm_lapic_set_reg(apic, APIC_TMICT, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) start_apic_timer(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) case APIC_TDCR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) uint32_t old_divisor = apic->divide_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) kvm_lapic_set_reg(apic, APIC_TDCR, val & 0xb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) update_divide_count(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) if (apic->divide_count != old_divisor &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) apic->lapic_timer.period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) hrtimer_cancel(&apic->lapic_timer.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) update_target_expiration(apic, old_divisor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) restart_apic_timer(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) case APIC_ESR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) if (apic_x2apic_mode(apic) && val != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) case APIC_SELF_IPI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) if (apic_x2apic_mode(apic)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) kvm_lapic_reg_write(apic, APIC_ICR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) APIC_DEST_SELF | (val & APIC_VECTOR_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) kvm_recalculate_apic_map(apic->vcpu->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) EXPORT_SYMBOL_GPL(kvm_lapic_reg_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) gpa_t address, int len, const void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) struct kvm_lapic *apic = to_lapic(this);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) unsigned int offset = address - apic->base_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) if (!apic_mmio_in_range(apic, address))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) if (!kvm_check_has_quirk(vcpu->kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) * APIC register must be aligned on 128-bits boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) * 32/64/128 bits registers must be accessed thru 32 bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) * Refer SDM 8.4.1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) if (len != 4 || (offset & 0xf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) val = *(u32*)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) kvm_lapic_reg_write(apic, offset & 0xff0, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) /* emulate APIC access in a trap manner */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) /* hw has done the conditional check and inst decode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) offset &= 0xff0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) kvm_lapic_reg_read(vcpu->arch.apic, offset, 4, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) /* TODO: optimize to just emulate side effect w/o one more write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) kvm_lapic_reg_write(vcpu->arch.apic, offset, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) void kvm_free_lapic(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) if (!vcpu->arch.apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) hrtimer_cancel(&apic->lapic_timer.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) static_key_slow_dec_deferred(&apic_hw_disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) if (!apic->sw_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) static_key_slow_dec_deferred(&apic_sw_disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) if (apic->regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) free_page((unsigned long)apic->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) kfree(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) *----------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) * LAPIC interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) *----------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) return apic->lapic_timer.tscdeadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) hrtimer_cancel(&apic->lapic_timer.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) apic->lapic_timer.tscdeadline = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) start_apic_timer(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) apic_set_tpr(vcpu->arch.apic, (cr8 & 0x0f) << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) u64 tpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) return (tpr & 0xf0) >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) u64 old_value = vcpu->arch.apic_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) if (!apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) value |= MSR_IA32_APICBASE_BSP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) vcpu->arch.apic_base = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) kvm_update_cpuid_runtime(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) if (!apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) /* update jump label if enable bit changes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) if (value & MSR_IA32_APICBASE_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) static_key_slow_dec_deferred(&apic_hw_disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) static_key_slow_inc(&apic_hw_disabled.key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) if (((old_value ^ value) & X2APIC_ENABLE) && (value & X2APIC_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) kvm_x86_ops.set_virtual_apic_mode(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) apic->base_address = apic->vcpu->arch.apic_base &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) MSR_IA32_APICBASE_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) if ((value & MSR_IA32_APICBASE_ENABLE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) apic->base_address != APIC_DEFAULT_PHYS_BASE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) pr_warn_once("APIC base relocation is unsupported by KVM");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) if (vcpu->arch.apicv_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) /* irr_pending is always true when apicv is activated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) apic->irr_pending = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) apic->isr_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) apic->irr_pending = (apic_search_irr(apic) != -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) apic->isr_count = count_vectors(apic->regs + APIC_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) EXPORT_SYMBOL_GPL(kvm_apic_update_apicv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) if (!apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) /* Stop the timer in case it's a reset to an active apic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) hrtimer_cancel(&apic->lapic_timer.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) if (!init_event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) kvm_lapic_set_base(vcpu, APIC_DEFAULT_PHYS_BASE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) MSR_IA32_APICBASE_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) kvm_apic_set_version(apic->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) for (i = 0; i < KVM_APIC_LVT_NUM; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) apic_update_lvtt(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) if (kvm_vcpu_is_reset_bsp(vcpu) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) kvm_lapic_set_reg(apic, APIC_LVT0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) kvm_apic_set_dfr(apic, 0xffffffffU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) apic_set_spiv(apic, 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) if (!apic_x2apic_mode(apic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) kvm_apic_set_ldr(apic, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) kvm_lapic_set_reg(apic, APIC_ESR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) kvm_lapic_set_reg(apic, APIC_ICR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) kvm_lapic_set_reg(apic, APIC_ICR2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) kvm_lapic_set_reg(apic, APIC_TDCR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) kvm_lapic_set_reg(apic, APIC_TMICT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) kvm_apic_update_apicv(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) apic->highest_isr_cache = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) update_divide_count(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) atomic_set(&apic->lapic_timer.pending, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) if (kvm_vcpu_is_bsp(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) kvm_lapic_set_base(vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) vcpu->arch.pv_eoi.msr_val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) apic_update_ppr(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) if (vcpu->arch.apicv_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) kvm_x86_ops.apicv_post_state_restore(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) kvm_x86_ops.hwapic_irr_update(vcpu, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) kvm_x86_ops.hwapic_isr_update(vcpu, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) vcpu->arch.apic_arb_prio = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) vcpu->arch.apic_attention = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) kvm_recalculate_apic_map(vcpu->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) *----------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) * timer interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) *----------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) static bool lapic_is_periodic(struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) return apic_lvtt_period(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) int apic_has_pending_timer(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) return atomic_read(&apic->lapic_timer.pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) u32 reg = kvm_lapic_get_reg(apic, lvt_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) int vector, mode, trig_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) vector = reg & APIC_VECTOR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) mode = reg & APIC_MODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) if (apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) kvm_apic_local_deliver(apic, APIC_LVT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) static const struct kvm_io_device_ops apic_mmio_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) .read = apic_mmio_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) .write = apic_mmio_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) apic_timer_expired(apic, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) if (lapic_is_periodic(apic)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) advance_periodic_target_expiration(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) return HRTIMER_RESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) return HRTIMER_NORESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) struct kvm_lapic *apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) ASSERT(vcpu != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) apic = kzalloc(sizeof(*apic), GFP_KERNEL_ACCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) if (!apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) vcpu->arch.apic = apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) if (!apic->regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) vcpu->vcpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) goto nomem_free_apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) apic->vcpu = vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) HRTIMER_MODE_ABS_HARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) apic->lapic_timer.timer.function = apic_timer_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) if (timer_advance_ns == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) lapic_timer_advance_dynamic = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) apic->lapic_timer.timer_advance_ns = timer_advance_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) lapic_timer_advance_dynamic = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) * APIC is created enabled. This will prevent kvm_lapic_set_base from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) * thinking that APIC state has changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) nomem_free_apic:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) kfree(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) vcpu->arch.apic = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) u32 ppr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) if (!kvm_apic_present(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) __apic_update_ppr(apic, &ppr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) return apic_has_interrupt_for_ppr(apic, ppr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) if (!kvm_apic_hw_enabled(vcpu->arch.apic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) if ((lvt0 & APIC_LVT_MASKED) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) if (atomic_read(&apic->lapic_timer.pending) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) kvm_apic_inject_pending_timer_irqs(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) atomic_set(&apic->lapic_timer.pending, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) int vector = kvm_apic_has_interrupt(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) u32 ppr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) if (vector == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) * We get here even with APIC virtualization enabled, if doing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) * nested virtualization and L1 runs with the "acknowledge interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) * on exit" mode. Then we cannot inject the interrupt via RVI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) * because the process would deliver it through the IDT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) apic_clear_irr(vector, apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) if (test_bit(vector, vcpu_to_synic(vcpu)->auto_eoi_bitmap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) * For auto-EOI interrupts, there might be another pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) * interrupt above PPR, so check whether to raise another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) * KVM_REQ_EVENT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) apic_update_ppr(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) * For normal interrupts, PPR has been raised and there cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) * be a higher-priority pending interrupt---except if there was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) * a concurrent interrupt injection, but that would have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) * triggered KVM_REQ_EVENT already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) apic_set_isr(vector, apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) __apic_update_ppr(apic, &ppr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) return vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) struct kvm_lapic_state *s, bool set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) if (apic_x2apic_mode(vcpu->arch.apic)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) u32 *id = (u32 *)(s->regs + APIC_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) u32 *ldr = (u32 *)(s->regs + APIC_LDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) if (vcpu->kvm->arch.x2apic_format) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) if (*id != vcpu->vcpu_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) if (set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) *id >>= 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) *id <<= 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) /* In x2APIC mode, the LDR is fixed and based on the id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) if (set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) *ldr = kvm_apic_calc_x2apic_ldr(*id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) * Get calculated timer current count for remaining timer period (if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) * any) and store it in the returned register set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) __kvm_lapic_set_reg(s->regs, APIC_TMCCT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) __apic_read(vcpu->arch.apic, APIC_TMCCT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) return kvm_apic_state_fixup(vcpu, s, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) /* set SPIV separately to get count of SW disabled APICs right */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) r = kvm_apic_state_fixup(vcpu, s, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) kvm_recalculate_apic_map(vcpu->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) kvm_recalculate_apic_map(vcpu->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) kvm_apic_set_version(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) apic_update_ppr(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) hrtimer_cancel(&apic->lapic_timer.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) apic_update_lvtt(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) update_divide_count(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) __start_apic_timer(apic, APIC_TMCCT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) kvm_apic_update_apicv(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) apic->highest_isr_cache = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) if (vcpu->arch.apicv_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) kvm_x86_ops.apicv_post_state_restore(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) kvm_x86_ops.hwapic_irr_update(vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) apic_find_highest_irr(apic));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) kvm_x86_ops.hwapic_isr_update(vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) apic_find_highest_isr(apic));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) kvm_make_request(KVM_REQ_EVENT, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) if (ioapic_in_kernel(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) kvm_rtc_eoi_tracking_restore_one(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) vcpu->arch.apic_arb_prio = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) struct hrtimer *timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) if (!lapic_in_kernel(vcpu) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) kvm_can_post_timer_interrupt(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) timer = &vcpu->arch.apic->lapic_timer.timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) if (hrtimer_cancel(timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) hrtimer_start_expires(timer, HRTIMER_MODE_ABS_HARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) * Detect whether guest triggered PV EOI since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) * last entry. If yes, set EOI on guests's behalf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) * Clear PV EOI in guest memory in any case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) bool pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) int vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) * and KVM_PV_EOI_ENABLED in guest memory as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) * KVM_APIC_PV_EOI_PENDING is unset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) * -> host disabled PV EOI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) * -> host enabled PV EOI, guest did not execute EOI yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) * -> host enabled PV EOI, guest executed EOI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) BUG_ON(!pv_eoi_enabled(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) pending = pv_eoi_get_pending(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) * Clear pending bit in any case: it will be set again on vmentry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) * While this might not be ideal from performance point of view,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) * this makes sure pv eoi is only enabled when we know it's safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) pv_eoi_clr_pending(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) if (pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) vector = apic_set_eoi(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) trace_kvm_pv_eoi(apic, vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) sizeof(u32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) apic_set_tpr(vcpu->arch.apic, data & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) * apic_sync_pv_eoi_to_guest - called before vmentry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) * Detect whether it's safe to enable PV EOI and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) * if yes do so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) struct kvm_lapic *apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) if (!pv_eoi_enabled(vcpu) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) /* IRR set or many bits in ISR: could be nested. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) apic->irr_pending ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) /* Cache not set: could be safe but we don't bother. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) apic->highest_isr_cache == -1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) /* Need EOI to update ioapic. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) * PV EOI was disabled by apic_sync_pv_eoi_from_guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) * so we need not do anything here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) pv_eoi_set_pending(apic->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) u32 data, tpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) int max_irr, max_isr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) apic_sync_pv_eoi_to_guest(vcpu, apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) max_irr = apic_find_highest_irr(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) if (max_irr < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) max_irr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) max_isr = apic_find_highest_isr(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) if (max_isr < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) max_isr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) if (vapic_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) &vcpu->arch.apic->vapic_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) vapic_addr, sizeof(u32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) vcpu->arch.apic->vapic_addr = vapic_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) u32 reg = (msr - APIC_BASE_MSR) << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) if (reg == APIC_ICR2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) /* if this is ICR write vector before command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) if (reg == APIC_ICR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) return kvm_lapic_reg_write(apic, reg, (u32)data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) if (reg == APIC_DFR || reg == APIC_ICR2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) if (kvm_lapic_reg_read(apic, reg, 4, &low))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) if (reg == APIC_ICR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) *data = (((u64)high) << 32) | low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) if (!lapic_in_kernel(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) /* if this is ICR write vector before command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) if (reg == APIC_ICR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) return kvm_lapic_reg_write(apic, reg, (u32)data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) u32 low, high = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) if (!lapic_in_kernel(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) if (kvm_lapic_reg_read(apic, reg, 4, &low))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) if (reg == APIC_ICR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) *data = (((u64)high) << 32) | low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) u64 addr = data & ~KVM_MSR_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) unsigned long new_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) if (!IS_ALIGNED(addr, 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) vcpu->arch.pv_eoi.msr_val = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) if (!pv_eoi_enabled(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) if (addr == ghc->gpa && len <= ghc->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) new_len = ghc->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) new_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) struct kvm_lapic *apic = vcpu->arch.apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) u8 sipi_vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) unsigned long pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) if (!lapic_in_kernel(vcpu) || !apic->pending_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) * INITs are latched while CPU is in specific states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) * (SMM, VMX non-root mode, SVM with GIF=0).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) * Because a CPU cannot be in these states immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) * after it has processed an INIT signal (and thus in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) * KVM_MP_STATE_INIT_RECEIVED state), just eat SIPIs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) * and leave the INIT pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) if (kvm_vcpu_latch_init(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) if (test_bit(KVM_APIC_SIPI, &apic->pending_events))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) clear_bit(KVM_APIC_SIPI, &apic->pending_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) pe = xchg(&apic->pending_events, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) if (test_bit(KVM_APIC_INIT, &pe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) kvm_vcpu_reset(vcpu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) if (kvm_vcpu_is_bsp(apic->vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) if (test_bit(KVM_APIC_SIPI, &pe) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) /* evaluate pending_events before reading the vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) sipi_vector = apic->sipi_vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) void kvm_lapic_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) /* do not patch jump label more than once per second */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) jump_label_rate_limit(&apic_hw_disabled, HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) jump_label_rate_limit(&apic_sw_disabled, HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) void kvm_lapic_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) static_key_deferred_flush(&apic_hw_disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) static_key_deferred_flush(&apic_sw_disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) }