^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Hyper-V specific spinlock code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2018, Intel, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Author : Yi Sun <yi.y.sun@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define pr_fmt(fmt) "Hyper-V: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/mshyperv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/paravirt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static bool __initdata hv_pvspin = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static void hv_qlock_kick(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) apic->send_IPI(cpu, X86_PLATFORM_IPI_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static void hv_qlock_wait(u8 *byte, u8 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) unsigned long msr_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) if (in_nmi())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Reading HV_X64_MSR_GUEST_IDLE MSR tells the hypervisor that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * vCPU can be put into 'idle' state. This 'idle' state is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * terminated by an IPI, usually from hv_qlock_kick(), even if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * interrupts are disabled on the vCPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * To prevent a race against the unlock path it is required to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * disable interrupts before accessing the HV_X64_MSR_GUEST_IDLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * MSR. Otherwise, if the IPI from hv_qlock_kick() arrives between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * the lock value check and the rdmsrl() then the vCPU might be put
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * into 'idle' state by the hypervisor and kept in that state for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * an unspecified amount of time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * Only issue the rdmsrl() when the lock state has not changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (READ_ONCE(*byte) == val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) rdmsrl(HV_X64_MSR_GUEST_IDLE, msr_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * Hyper-V does not support this so far.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) __visible bool hv_vcpu_is_preempted(int vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) PV_CALLEE_SAVE_REGS_THUNK(hv_vcpu_is_preempted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) void __init hv_init_spinlocks(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (!hv_pvspin || !apic ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) !(ms_hyperv.hints & HV_X64_CLUSTER_IPI_RECOMMENDED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) !(ms_hyperv.features & HV_MSR_GUEST_IDLE_AVAILABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) pr_info("PV spinlocks disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) pr_info("PV spinlocks enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) __pv_init_lock_hash();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) pv_ops.lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) pv_ops.lock.wait = hv_qlock_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) pv_ops.lock.kick = hv_qlock_kick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(hv_vcpu_is_preempted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static __init int hv_parse_nopvspin(char *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) hv_pvspin = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) early_param("hv_nopvspin", hv_parse_nopvspin);