^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) // Copyright (C) 2019 Arm Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/arm-smccc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/sched/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/kvm_mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/pvclock-abi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <kvm/arm_hypercalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) struct kvm *kvm = vcpu->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) u64 base = vcpu->arch.steal.base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) u64 last_steal = vcpu->arch.steal.last_steal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) u64 offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) u64 steal = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) if (base == GPA_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) idx = srcu_read_lock(&kvm->srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) if (!kvm_get_guest(kvm, base + offset, steal)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) steal = le64_to_cpu(steal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) steal += vcpu->arch.steal.last_steal - last_steal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) kvm_put_guest(kvm, base + offset, cpu_to_le64(steal));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) srcu_read_unlock(&kvm->srcu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) u32 feature = smccc_get_arg1(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) long val = SMCCC_RET_NOT_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) switch (feature) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) case ARM_SMCCC_HV_PV_TIME_FEATURES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) case ARM_SMCCC_HV_PV_TIME_ST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (vcpu->arch.steal.base != GPA_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) val = SMCCC_RET_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct pvclock_vcpu_stolen_time init_values = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct kvm *kvm = vcpu->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) u64 base = vcpu->arch.steal.base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (base == GPA_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * Start counting stolen time from the time the guest requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * the feature enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) vcpu->arch.steal.last_steal = current->sched_info.run_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) kvm_write_guest_lock(kvm, base, &init_values, sizeof(init_values));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) bool kvm_arm_pvtime_supported(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return !!sched_info_on();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) u64 __user *user = (u64 __user *)attr->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct kvm *kvm = vcpu->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) u64 ipa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (!kvm_arm_pvtime_supported() ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (get_user(ipa, user))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (!IS_ALIGNED(ipa, 64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (vcpu->arch.steal.base != GPA_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* Check the address is in a valid memslot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) idx = srcu_read_lock(&kvm->srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (kvm_is_error_hva(gfn_to_hva(kvm, ipa >> PAGE_SHIFT)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) srcu_read_unlock(&kvm->srcu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) vcpu->arch.steal.base = ipa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) u64 __user *user = (u64 __user *)attr->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) u64 ipa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (!kvm_arm_pvtime_supported() ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) ipa = vcpu->arch.steal.base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (put_user(ipa, user))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) case KVM_ARM_VCPU_PVTIME_IPA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (kvm_arm_pvtime_supported())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }