^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * VMware Detection code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2008, VMware, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Author : Alok N Kataria <akataria@vmware.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * it under the terms of the GNU General Public License as published by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * the Free Software Foundation; either version 2 of the License, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * (at your option) any later version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * This program is distributed in the hope that it will be useful, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * NON INFRINGEMENT. See the GNU General Public License for more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * You should have received a copy of the GNU General Public License
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * along with this program; if not, write to the Free Software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/dmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/clocksource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/div64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/x86_init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/vmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/svm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #undef pr_fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define pr_fmt(fmt) "vmware: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define CPUID_VMWARE_INFO_LEAF 0x40000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define CPUID_VMWARE_FEATURES_LEAF 0x40000010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define CPUID_VMWARE_FEATURES_ECX_VMMCALL BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define CPUID_VMWARE_FEATURES_ECX_VMCALL BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define VMWARE_HYPERVISOR_MAGIC 0x564D5868
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define VMWARE_CMD_GETVERSION 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define VMWARE_CMD_GETHZ 45
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define VMWARE_CMD_GETVCPU_INFO 68
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define VMWARE_CMD_LEGACY_X2APIC 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define VMWARE_CMD_VCPU_RESERVED 31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define VMWARE_CMD_STEALCLOCK 91
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define STEALCLOCK_NOT_AVAILABLE (-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define STEALCLOCK_DISABLED 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define STEALCLOCK_ENABLED 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define VMWARE_PORT(cmd, eax, ebx, ecx, edx) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) __asm__("inl (%%dx), %%eax" : \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) "a"(VMWARE_HYPERVISOR_MAGIC), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) "c"(VMWARE_CMD_##cmd), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) "d"(VMWARE_HYPERVISOR_PORT), "b"(UINT_MAX) : \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define VMWARE_VMCALL(cmd, eax, ebx, ecx, edx) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) __asm__("vmcall" : \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) "a"(VMWARE_HYPERVISOR_MAGIC), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) "c"(VMWARE_CMD_##cmd), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) "d"(0), "b"(UINT_MAX) : \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define VMWARE_VMMCALL(cmd, eax, ebx, ecx, edx) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) __asm__("vmmcall" : \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) "a"(VMWARE_HYPERVISOR_MAGIC), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) "c"(VMWARE_CMD_##cmd), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) "d"(0), "b"(UINT_MAX) : \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define VMWARE_CMD(cmd, eax, ebx, ecx, edx) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) switch (vmware_hypercall_mode) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) case CPUID_VMWARE_FEATURES_ECX_VMCALL: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) VMWARE_VMCALL(cmd, eax, ebx, ecx, edx); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) case CPUID_VMWARE_FEATURES_ECX_VMMCALL: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) VMWARE_VMMCALL(cmd, eax, ebx, ecx, edx); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) default: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) VMWARE_PORT(cmd, eax, ebx, ecx, edx); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct vmware_steal_time {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) uint64_t clock; /* stolen time counter in units of vtsc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* only for little-endian */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) uint32_t clock_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) uint32_t clock_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) uint64_t reserved[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static unsigned long vmware_tsc_khz __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static u8 vmware_hypercall_mode __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static inline int __vmware_platform(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) uint32_t eax, ebx, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) VMWARE_CMD(GETVERSION, eax, ebx, ecx, edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return eax != (uint32_t)-1 && ebx == VMWARE_HYPERVISOR_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static unsigned long vmware_get_tsc_khz(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return vmware_tsc_khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #ifdef CONFIG_PARAVIRT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static struct cyc2ns_data vmware_cyc2ns __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static bool vmw_sched_clock __initdata = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static DEFINE_PER_CPU_DECRYPTED(struct vmware_steal_time, vmw_steal_time) __aligned(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static bool has_steal_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static bool steal_acc __initdata = true; /* steal time accounting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static __init int setup_vmw_sched_clock(char *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) vmw_sched_clock = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) early_param("no-vmw-sched-clock", setup_vmw_sched_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static __init int parse_no_stealacc(char *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) steal_acc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) early_param("no-steal-acc", parse_no_stealacc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static unsigned long long notrace vmware_sched_clock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) unsigned long long ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) ns = mul_u64_u32_shr(rdtsc(), vmware_cyc2ns.cyc2ns_mul,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) vmware_cyc2ns.cyc2ns_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) ns -= vmware_cyc2ns.cyc2ns_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static void __init vmware_cyc2ns_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct cyc2ns_data *d = &vmware_cyc2ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) unsigned long long tsc_now = rdtsc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) clocks_calc_mult_shift(&d->cyc2ns_mul, &d->cyc2ns_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) vmware_tsc_khz, NSEC_PER_MSEC, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) d->cyc2ns_offset = mul_u64_u32_shr(tsc_now, d->cyc2ns_mul,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) d->cyc2ns_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) pr_info("using clock offset of %llu ns\n", d->cyc2ns_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static int vmware_cmd_stealclock(uint32_t arg1, uint32_t arg2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) uint32_t result, info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) asm volatile (VMWARE_HYPERCALL :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) "=a"(result),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) "=c"(info) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) "a"(VMWARE_HYPERVISOR_MAGIC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) "b"(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) "c"(VMWARE_CMD_STEALCLOCK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) "d"(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) "S"(arg1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) "D"(arg2) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static bool stealclock_enable(phys_addr_t pa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return vmware_cmd_stealclock(upper_32_bits(pa),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) lower_32_bits(pa)) == STEALCLOCK_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static int __stealclock_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return vmware_cmd_stealclock(0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static void stealclock_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) __stealclock_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static bool vmware_is_stealclock_available(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return __stealclock_disable() != STEALCLOCK_NOT_AVAILABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * vmware_steal_clock() - read the per-cpu steal clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * @cpu: the cpu number whose steal clock we want to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * The function reads the steal clock if we are on a 64-bit system, otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * reads it in parts, checking that the high part didn't change in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * meantime.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * The steal clock reading in ns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static uint64_t vmware_steal_clock(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct vmware_steal_time *steal = &per_cpu(vmw_steal_time, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) uint64_t clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (IS_ENABLED(CONFIG_64BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) clock = READ_ONCE(steal->clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) uint32_t initial_high, low, high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) initial_high = READ_ONCE(steal->clock_high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /* Do not reorder initial_high and high readings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) virt_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) low = READ_ONCE(steal->clock_low);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /* Keep low reading in between */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) virt_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) high = READ_ONCE(steal->clock_high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) } while (initial_high != high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) clock = ((uint64_t)high << 32) | low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return mul_u64_u32_shr(clock, vmware_cyc2ns.cyc2ns_mul,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) vmware_cyc2ns.cyc2ns_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static void vmware_register_steal_time(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct vmware_steal_time *st = &per_cpu(vmw_steal_time, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (!has_steal_clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (!stealclock_enable(slow_virt_to_phys(st))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) has_steal_clock = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) pr_info("vmware-stealtime: cpu %d, pa %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) cpu, (unsigned long long) slow_virt_to_phys(st));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static void vmware_disable_steal_time(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (!has_steal_clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) stealclock_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static void vmware_guest_cpu_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (has_steal_clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) vmware_register_steal_time();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static void vmware_pv_guest_cpu_reboot(void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) vmware_disable_steal_time();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static int vmware_pv_reboot_notify(struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) unsigned long code, void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (code == SYS_RESTART)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) on_each_cpu(vmware_pv_guest_cpu_reboot, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static struct notifier_block vmware_pv_reboot_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) .notifier_call = vmware_pv_reboot_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static void __init vmware_smp_prepare_boot_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) vmware_guest_cpu_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) native_smp_prepare_boot_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static int vmware_cpu_online(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) vmware_guest_cpu_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static int vmware_cpu_down_prepare(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) vmware_disable_steal_time();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) static __init int activate_jump_labels(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (has_steal_clock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static_key_slow_inc(¶virt_steal_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (steal_acc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) static_key_slow_inc(¶virt_steal_rq_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) arch_initcall(activate_jump_labels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static void __init vmware_paravirt_ops_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) pv_info.name = "VMware hypervisor";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) pv_ops.cpu.io_delay = paravirt_nop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (vmware_tsc_khz == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) vmware_cyc2ns_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (vmw_sched_clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) pv_ops.time.sched_clock = vmware_sched_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (vmware_is_stealclock_available()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) has_steal_clock = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) pv_ops.time.steal_clock = vmware_steal_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /* We use reboot notifier only to disable steal clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) register_reboot_notifier(&vmware_pv_reboot_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) smp_ops.smp_prepare_boot_cpu =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) vmware_smp_prepare_boot_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) "x86/vmware:online",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) vmware_cpu_online,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) vmware_cpu_down_prepare) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) pr_err("vmware_guest: Failed to install cpu hotplug callbacks\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) vmware_guest_cpu_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) #define vmware_paravirt_ops_setup() do {} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * VMware hypervisor takes care of exporting a reliable TSC to the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * Still, due to timing difference when running on virtual cpus, the TSC can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * be marked as unstable in some cases. For example, the TSC sync check at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * bootup can fail due to a marginal offset between vcpus' TSCs (though the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * TSCs do not drift from each other). Also, the ACPI PM timer clocksource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * is not suitable as a watchdog when running on a hypervisor because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * kernel may miss a wrap of the counter if the vcpu is descheduled for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * long time. To skip these checks at runtime we set these capability bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * so that the kernel could just trust the hypervisor with providing a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * reliable virtual TSC that is suitable for timekeeping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static void __init vmware_set_capabilities(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) setup_force_cpu_cap(X86_FEATURE_CONSTANT_TSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (vmware_hypercall_mode == CPUID_VMWARE_FEATURES_ECX_VMCALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) setup_force_cpu_cap(X86_FEATURE_VMCALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) else if (vmware_hypercall_mode == CPUID_VMWARE_FEATURES_ECX_VMMCALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) setup_force_cpu_cap(X86_FEATURE_VMW_VMMCALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static void __init vmware_platform_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) uint32_t eax, ebx, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) uint64_t lpj, tsc_khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) VMWARE_CMD(GETHZ, eax, ebx, ecx, edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (ebx != UINT_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) lpj = tsc_khz = eax | (((uint64_t)ebx) << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) do_div(tsc_khz, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) WARN_ON(tsc_khz >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) pr_info("TSC freq read from hypervisor : %lu.%03lu MHz\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) (unsigned long) tsc_khz / 1000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) (unsigned long) tsc_khz % 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (!preset_lpj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) do_div(lpj, HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) preset_lpj = lpj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) vmware_tsc_khz = tsc_khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) x86_platform.calibrate_tsc = vmware_get_tsc_khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) x86_platform.calibrate_cpu = vmware_get_tsc_khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) #ifdef CONFIG_X86_LOCAL_APIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /* Skip lapic calibration since we know the bus frequency. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) lapic_timer_period = ecx / HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) pr_info("Host bus clock speed read from hypervisor : %u Hz\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) ecx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) pr_warn("Failed to get TSC freq from the hypervisor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) vmware_paravirt_ops_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) #ifdef CONFIG_X86_IO_APIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) no_timer_check = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) vmware_set_capabilities();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static u8 __init vmware_select_hypercall(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) int eax, ebx, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) cpuid(CPUID_VMWARE_FEATURES_LEAF, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return (ecx & (CPUID_VMWARE_FEATURES_ECX_VMMCALL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) CPUID_VMWARE_FEATURES_ECX_VMCALL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * While checking the dmi string information, just checking the product
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * serial key should be enough, as this will always have a VMware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * specific string when running under VMware hypervisor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * If !boot_cpu_has(X86_FEATURE_HYPERVISOR), vmware_hypercall_mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * intentionally defaults to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static uint32_t __init vmware_platform(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) unsigned int eax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) unsigned int hyper_vendor_id[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &hyper_vendor_id[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) &hyper_vendor_id[1], &hyper_vendor_id[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (!memcmp(hyper_vendor_id, "VMwareVMware", 12)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (eax >= CPUID_VMWARE_FEATURES_LEAF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) vmware_hypercall_mode =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) vmware_select_hypercall();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) pr_info("hypercall mode: 0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) (unsigned int) vmware_hypercall_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return CPUID_VMWARE_INFO_LEAF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) } else if (dmi_available && dmi_name_in_serial("VMware") &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) __vmware_platform())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /* Checks if hypervisor supports x2apic without VT-D interrupt remapping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) static bool __init vmware_legacy_x2apic_available(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) uint32_t eax, ebx, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) VMWARE_CMD(GETVCPU_INFO, eax, ebx, ecx, edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return (eax & (1 << VMWARE_CMD_VCPU_RESERVED)) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) (eax & (1 << VMWARE_CMD_LEGACY_X2APIC)) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) #ifdef CONFIG_AMD_MEM_ENCRYPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) static void vmware_sev_es_hcall_prepare(struct ghcb *ghcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /* Copy VMWARE specific Hypercall parameters to the GHCB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) ghcb_set_rip(ghcb, regs->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) ghcb_set_rbx(ghcb, regs->bx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) ghcb_set_rcx(ghcb, regs->cx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) ghcb_set_rdx(ghcb, regs->dx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) ghcb_set_rsi(ghcb, regs->si);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) ghcb_set_rdi(ghcb, regs->di);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) ghcb_set_rbp(ghcb, regs->bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static bool vmware_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (!(ghcb_rbx_is_valid(ghcb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) ghcb_rcx_is_valid(ghcb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) ghcb_rdx_is_valid(ghcb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) ghcb_rsi_is_valid(ghcb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) ghcb_rdi_is_valid(ghcb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) ghcb_rbp_is_valid(ghcb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) regs->bx = ghcb->save.rbx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) regs->cx = ghcb->save.rcx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) regs->dx = ghcb->save.rdx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) regs->si = ghcb->save.rsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) regs->di = ghcb->save.rdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) regs->bp = ghcb->save.rbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) const __initconst struct hypervisor_x86 x86_hyper_vmware = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) .name = "VMware",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) .detect = vmware_platform,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) .type = X86_HYPER_VMWARE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) .init.init_platform = vmware_platform_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) .init.x2apic_available = vmware_legacy_x2apic_available,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) #ifdef CONFIG_AMD_MEM_ENCRYPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) .runtime.sev_es_hcall_prepare = vmware_sev_es_hcall_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) .runtime.sev_es_hcall_finish = vmware_sev_es_hcall_finish,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) };