Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (C) 2012,2013 - ARM Ltd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Author: Marc Zyngier <marc.zyngier@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Derived from arch/arm/kvm/guest.c:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/bits.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/nospec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/stddef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <kvm/arm_psci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <asm/cputype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <asm/fpsimd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <asm/kvm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <asm/kvm_emulate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <asm/sigcontext.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) struct kvm_stats_debugfs_item debugfs_entries[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 	VCPU_STAT("halt_successful_poll", halt_successful_poll),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 	VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 	VCPU_STAT("halt_wakeup", halt_wakeup),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	VCPU_STAT("hvc_exit_stat", hvc_exit_stat),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	VCPU_STAT("wfe_exit_stat", wfe_exit_stat),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	VCPU_STAT("wfi_exit_stat", wfi_exit_stat),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	VCPU_STAT("mmio_exit_user", mmio_exit_user),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	VCPU_STAT("mmio_exit_kernel", mmio_exit_kernel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	VCPU_STAT("exits", exits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	{ NULL }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) static bool core_reg_offset_is_vreg(u64 off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 		off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) static u64 core_reg_offset_from_id(u64 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	switch (off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	     KVM_REG_ARM_CORE_REG(regs.regs[30]):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	case KVM_REG_ARM_CORE_REG(regs.sp):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	case KVM_REG_ARM_CORE_REG(regs.pc):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	case KVM_REG_ARM_CORE_REG(regs.pstate):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	case KVM_REG_ARM_CORE_REG(sp_el1):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	case KVM_REG_ARM_CORE_REG(elr_el1):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	case KVM_REG_ARM_CORE_REG(spsr[0]) ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	     KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 		size = sizeof(__u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	     KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 		size = sizeof(__uint128_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 		size = sizeof(__u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	if (!IS_ALIGNED(off, size / sizeof(__u32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	 * The KVM_REG_ARM64_SVE regs must be used instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	 * SVE-enabled vcpus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) static void *core_reg_addr(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	u64 off = core_reg_offset_from_id(reg->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	int size = core_reg_size_from_offset(vcpu, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	if (size < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	if (KVM_REG_SIZE(reg->id) != size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	switch (off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	     KVM_REG_ARM_CORE_REG(regs.regs[30]):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		off -= KVM_REG_ARM_CORE_REG(regs.regs[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		off /= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		return &vcpu->arch.ctxt.regs.regs[off];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	case KVM_REG_ARM_CORE_REG(regs.sp):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		return &vcpu->arch.ctxt.regs.sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	case KVM_REG_ARM_CORE_REG(regs.pc):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		return &vcpu->arch.ctxt.regs.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	case KVM_REG_ARM_CORE_REG(regs.pstate):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		return &vcpu->arch.ctxt.regs.pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	case KVM_REG_ARM_CORE_REG(sp_el1):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		return __ctxt_sys_reg(&vcpu->arch.ctxt, SP_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	case KVM_REG_ARM_CORE_REG(elr_el1):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		return __ctxt_sys_reg(&vcpu->arch.ctxt, ELR_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_EL1]):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		return __ctxt_sys_reg(&vcpu->arch.ctxt, SPSR_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_ABT]):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		return &vcpu->arch.ctxt.spsr_abt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_UND]):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 		return &vcpu->arch.ctxt.spsr_und;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_IRQ]):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 		return &vcpu->arch.ctxt.spsr_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_FIQ]):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		return &vcpu->arch.ctxt.spsr_fiq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	     KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 		off -= KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 		off /= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 		return &vcpu->arch.ctxt.fp_regs.vregs[off];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		return &vcpu->arch.ctxt.fp_regs.fpsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 		return &vcpu->arch.ctxt.fp_regs.fpcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	 * Because the kvm_regs structure is a mix of 32, 64 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	 * 128bit fields, we index it as if it was a 32bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	 * array. Hence below, nr_regs is the number of entries, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	 * off the index in the "array".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	__u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	u32 off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	/* Our ID is an index into the kvm_regs struct. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	off = core_reg_offset_from_id(reg->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	if (off >= nr_regs ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	    (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	addr = core_reg_addr(vcpu, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	if (copy_to_user(uaddr, addr, KVM_REG_SIZE(reg->id)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	__u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	__uint128_t tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	void *valp = &tmp, *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	u64 off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	/* Our ID is an index into the kvm_regs struct. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	off = core_reg_offset_from_id(reg->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	if (off >= nr_regs ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	    (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	addr = core_reg_addr(vcpu, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		switch (mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		case PSR_AA32_MODE_USR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 			if (!system_supports_32bit_el0())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		case PSR_AA32_MODE_FIQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		case PSR_AA32_MODE_IRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		case PSR_AA32_MODE_SVC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 		case PSR_AA32_MODE_ABT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		case PSR_AA32_MODE_UND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 			if (!vcpu_el1_is_32bit(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		case PSR_MODE_EL0t:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		case PSR_MODE_EL1t:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		case PSR_MODE_EL1h:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 			if (vcpu_el1_is_32bit(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	memcpy(addr, valp, KVM_REG_SIZE(reg->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		int i, nr_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		switch (*vcpu_cpsr(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		 * Either we are dealing with user mode, and only the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		 * first 15 registers (+ PC) must be narrowed to 32bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		 * AArch32 r0-r14 conveniently map to AArch64 x0-x14.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		case PSR_AA32_MODE_USR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		case PSR_AA32_MODE_SYS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 			nr_reg = 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		 * Otherwide, this is a priviledged mode, and *all* the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		 * registers must be narrowed to 32bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 			nr_reg = 31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		for (i = 0; i < nr_reg; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 			vcpu_set_reg(vcpu, i, (u32)vcpu_get_reg(vcpu, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		*vcpu_pc(vcpu) = (u32)*vcpu_pc(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) #define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	unsigned int max_vq, vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	if (!vcpu_has_sve(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	memset(vqs, 0, sizeof(vqs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	max_vq = vcpu_sve_max_vq(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		if (sve_vq_available(vq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 			vqs[vq_word(vq)] |= vq_mask(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	unsigned int max_vq, vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	if (!vcpu_has_sve(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	if (kvm_arm_vcpu_sve_finalized(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		return -EPERM; /* too late! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	if (WARN_ON(vcpu->arch.sve_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	max_vq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		if (vq_present(vqs, vq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 			max_vq = vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	if (max_vq > sve_vq_from_vl(kvm_sve_max_vl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	 * Vector lengths supported by the host can't currently be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	 * hidden from the guest individually: instead we can only set a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	 * maximum via ZCR_EL2.LEN.  So, make sure the available vector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	 * lengths match the set requested exactly up to the requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	 * maximum:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		if (vq_present(vqs, vq) != sve_vq_available(vq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	/* Can't run with no vector lengths at all: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	if (max_vq < SVE_VQ_MIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	/* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) #define SVE_REG_SLICE_SHIFT	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) #define SVE_REG_SLICE_BITS	5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) #define SVE_REG_ID_SHIFT	(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) #define SVE_REG_ID_BITS		5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) #define SVE_REG_SLICE_MASK					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1,	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		SVE_REG_SLICE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) #define SVE_REG_ID_MASK							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) #define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) #define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) #define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376)  * Number of register slices required to cover each whole SVE register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377)  * NOTE: Only the first slice every exists, for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378)  * If you are tempted to modify this, you must also rework sve_reg_to_region()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379)  * to match:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) #define vcpu_sve_slices(vcpu) 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) /* Bounds of a single SVE register slice within vcpu->arch.sve_state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) struct sve_state_reg_region {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	unsigned int koffset;	/* offset into sve_state in kernel memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	unsigned int klen;	/* length in kernel memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	unsigned int upad;	/* extra trailing padding in user memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391)  * Validate SVE register ID and get sanitised bounds for user/kernel SVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392)  * register copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) static int sve_reg_to_region(struct sve_state_reg_region *region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 			     struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 			     const struct kvm_one_reg *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	/* reg ID ranges for Z- registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 						       SVE_NUM_SLICES - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	/* reg ID ranges for P- registers and FFR (which are contiguous) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	unsigned int vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	unsigned int reg_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	unsigned int reqoffset, reqlen; /* User-requested offset and length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	unsigned int maxlen; /* Maximum permitted length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	size_t sve_state_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	const u64 last_preg_id = KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 							SVE_NUM_SLICES - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	/* Verify that the P-regs and FFR really do have contiguous IDs: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	/* Verify that we match the UAPI header: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 			return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		vq = vcpu_sve_max_vq(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 				SVE_SIG_REGS_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		reqlen = KVM_SVE_ZREG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		maxlen = SVE_SIG_ZREG_SIZE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	} else if (reg->id >= preg_id_min && reg->id <= preg_id_max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 			return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		vq = vcpu_sve_max_vq(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 				SVE_SIG_REGS_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		reqlen = KVM_SVE_PREG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		maxlen = SVE_SIG_PREG_SIZE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	sve_state_size = vcpu_sve_state_size(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	if (WARN_ON(!sve_state_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	region->koffset = array_index_nospec(reqoffset, sve_state_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	region->klen = min(maxlen, reqlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	region->upad = reqlen - region->klen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	struct sve_state_reg_region region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	char __user *uptr = (char __user *)reg->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	/* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	if (reg->id == KVM_REG_ARM64_SVE_VLS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		return get_sve_vls(vcpu, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	/* Try to interpret reg ID as an architectural SVE register... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	ret = sve_reg_to_region(&region, vcpu, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	if (!kvm_arm_vcpu_sve_finalized(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 			 region.klen) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	    clear_user(uptr + region.klen, region.upad))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	struct sve_state_reg_region region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	const char __user *uptr = (const char __user *)reg->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	/* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	if (reg->id == KVM_REG_ARM64_SVE_VLS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		return set_sve_vls(vcpu, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	/* Try to interpret reg ID as an architectural SVE register... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	ret = sve_reg_to_region(&region, vcpu, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	if (!kvm_arm_vcpu_sve_finalized(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 			   region.klen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 				 u64 __user *uindices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	int n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		u64 reg = KVM_REG_ARM64 | KVM_REG_ARM_CORE | i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		int size = core_reg_size_from_offset(vcpu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		if (size < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		case sizeof(__u32):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 			reg |= KVM_REG_SIZE_U32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		case sizeof(__u64):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 			reg |= KVM_REG_SIZE_U64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		case sizeof(__uint128_t):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 			reg |= KVM_REG_SIZE_U128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 			WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		if (uindices) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 			if (put_user(reg, uindices))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 				return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 			uindices++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	return copy_core_reg_indices(vcpu, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571)  * ARM64 versions of the TIMER registers, always available on arm64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) #define NUM_TIMER_REGS 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) static bool is_timer_reg(u64 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	switch (index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	case KVM_REG_ARM_TIMER_CTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	case KVM_REG_ARM_TIMER_CNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	case KVM_REG_ARM_TIMER_CVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	uindices++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	uindices++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	void __user *uaddr = (void __user *)(long)reg->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	return kvm_arm_timer_set_reg(vcpu, reg->id, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	void __user *uaddr = (void __user *)(long)reg->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	val = kvm_arm_timer_get_reg(vcpu, reg->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	const unsigned int slices = vcpu_sve_slices(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	if (!vcpu_has_sve(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	/* Policed by KVM_GET_REG_LIST: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		+ 1; /* KVM_REG_ARM64_SVE_VLS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 				u64 __user *uindices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	const unsigned int slices = vcpu_sve_slices(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	unsigned int i, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	int num_regs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	if (!vcpu_has_sve(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	/* Policed by KVM_GET_REG_LIST: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	 * Enumerate this first, so that userspace can save/restore in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	 * the order reported by KVM_GET_REG_LIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	reg = KVM_REG_ARM64_SVE_VLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	if (put_user(reg, uindices++))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	++num_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	for (i = 0; i < slices; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		for (n = 0; n < SVE_NUM_ZREGS; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 			reg = KVM_REG_ARM64_SVE_ZREG(n, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 			if (put_user(reg, uindices++))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 				return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 			num_regs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		for (n = 0; n < SVE_NUM_PREGS; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 			reg = KVM_REG_ARM64_SVE_PREG(n, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 			if (put_user(reg, uindices++))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 				return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 			num_regs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		reg = KVM_REG_ARM64_SVE_FFR(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		if (put_user(reg, uindices++))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		num_regs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	return num_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685)  * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687)  * This is for all registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	unsigned long res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	res += num_core_regs(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	res += num_sve_regs(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	res += kvm_arm_num_sys_reg_descs(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	res += kvm_arm_get_fw_num_regs(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	res += NUM_TIMER_REGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703)  * kvm_arm_copy_reg_indices - get indices of all registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705)  * We do core registers right here, then we append system regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	ret = copy_core_reg_indices(vcpu, uindices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	uindices += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	ret = copy_sve_reg_indices(vcpu, uindices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	uindices += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	uindices += kvm_arm_get_fw_num_regs(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	ret = copy_timer_indices(vcpu, uindices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	uindices += NUM_TIMER_REGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	/* We currently use nothing arch-specific in upper 32 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	case KVM_REG_ARM_CORE:	return get_core_reg(vcpu, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	case KVM_REG_ARM_FW:	return kvm_arm_get_fw_reg(vcpu, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	case KVM_REG_ARM64_SVE:	return get_sve_reg(vcpu, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	if (is_timer_reg(reg->id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		return get_timer_reg(vcpu, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	return kvm_arm_sys_reg_get_reg(vcpu, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	/* We currently use nothing arch-specific in upper 32 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	case KVM_REG_ARM_CORE:	return set_core_reg(vcpu, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	case KVM_REG_ARM_FW:	return kvm_arm_set_fw_reg(vcpu, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	case KVM_REG_ARM64_SVE:	return set_sve_reg(vcpu, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	if (is_timer_reg(reg->id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		return set_timer_reg(vcpu, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	return kvm_arm_sys_reg_set_reg(vcpu, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 				  struct kvm_sregs *sregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 				  struct kvm_sregs *sregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 			      struct kvm_vcpu_events *events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	if (events->exception.serror_pending && events->exception.serror_has_esr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		events->exception.serror_esr = vcpu_get_vsesr(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	 * We never return a pending ext_dabt here because we deliver it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	 * the virtual CPU directly when setting the event and it's no longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	 * 'pending' at this point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 			      struct kvm_vcpu_events *events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	bool serror_pending = events->exception.serror_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	bool has_esr = events->exception.serror_has_esr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	bool ext_dabt_pending = events->exception.ext_dabt_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	if (serror_pending && has_esr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 			kvm_set_sei_esr(vcpu, events->exception.serror_esr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	} else if (serror_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		kvm_inject_vabt(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	if (ext_dabt_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) int __attribute_const__ kvm_target_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	unsigned long implementor = read_cpuid_implementor();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	unsigned long part_number = read_cpuid_part_number();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	switch (implementor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	case ARM_CPU_IMP_ARM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		switch (part_number) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		case ARM_CPU_PART_AEM_V8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 			return KVM_ARM_TARGET_AEM_V8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		case ARM_CPU_PART_FOUNDATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 			return KVM_ARM_TARGET_FOUNDATION_V8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		case ARM_CPU_PART_CORTEX_A53:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 			return KVM_ARM_TARGET_CORTEX_A53;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		case ARM_CPU_PART_CORTEX_A57:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 			return KVM_ARM_TARGET_CORTEX_A57;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	case ARM_CPU_IMP_APM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		switch (part_number) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		case APM_CPU_PART_POTENZA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 			return KVM_ARM_TARGET_XGENE_POTENZA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	/* Return a default generic target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	return KVM_ARM_TARGET_GENERIC_V8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	int target = kvm_target_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	if (target < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	memset(init, 0, sizeof(*init));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	 * For now, we don't return any features.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	 * In future, we might use features to return target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	 * specific features available for the preferred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	 * target type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	init->target = (__u32)target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 				  struct kvm_translation *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE |    \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 			    KVM_GUESTDBG_USE_SW_BP | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 			    KVM_GUESTDBG_USE_HW | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 			    KVM_GUESTDBG_SINGLESTEP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897)  * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898)  * @kvm:	pointer to the KVM struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899)  * @kvm_guest_debug: the ioctl data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901)  * This sets up and enables the VM for guest debugging. Userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902)  * passes in a control flag to enable different debug types and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903)  * potentially other architecture specific information in the rest of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904)  * the structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 					struct kvm_guest_debug *dbg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	trace_kvm_set_guest_debug(vcpu, dbg->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		vcpu->guest_debug = dbg->control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		/* Hardware assisted Break and Watch points */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 			vcpu->arch.external_debug_state = dbg->arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		/* If not enabled clear all flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		vcpu->guest_debug = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 			       struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	case KVM_ARM_VCPU_PMU_V3_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	case KVM_ARM_VCPU_TIMER_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		ret = kvm_arm_timer_set_attr(vcpu, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	case KVM_ARM_VCPU_PVTIME_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		ret = kvm_arm_pvtime_set_attr(vcpu, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 			       struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	case KVM_ARM_VCPU_PMU_V3_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		ret = kvm_arm_pmu_v3_get_attr(vcpu, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	case KVM_ARM_VCPU_TIMER_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		ret = kvm_arm_timer_get_attr(vcpu, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	case KVM_ARM_VCPU_PVTIME_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		ret = kvm_arm_pvtime_get_attr(vcpu, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 			       struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	case KVM_ARM_VCPU_PMU_V3_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		ret = kvm_arm_pmu_v3_has_attr(vcpu, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	case KVM_ARM_VCPU_TIMER_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		ret = kvm_arm_timer_has_attr(vcpu, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	case KVM_ARM_VCPU_PVTIME_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		ret = kvm_arm_pvtime_has_attr(vcpu, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }