Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * License.  See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * KVM/MIPS: Support for hardware virtualization extensions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Authors: Yann Le Du <ledu@kymasys.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/preempt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <asm/cacheops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <asm/cmpxchg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <asm/fpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <asm/hazards.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <asm/inst.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <asm/r4kcache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <asm/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <asm/tlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <asm/tlbex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include "interrupt.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #ifdef CONFIG_CPU_LOONGSON64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include "loongson_regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) /* Pointers to last VCPU loaded on each physical CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) static struct kvm_vcpu *last_vcpu[NR_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) /* Pointers to last VCPU executed on each physical CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) static struct kvm_vcpu *last_exec_vcpu[NR_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  * Number of guest VTLB entries to use, so we can catch inconsistency between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  * CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) static unsigned int kvm_vz_guest_vtlb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) static inline long kvm_vz_read_gc0_ebase(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	if (sizeof(long) == 8 && cpu_has_ebase_wg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 		return read_gc0_ebase_64();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 		return read_gc0_ebase();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) static inline void kvm_vz_write_gc0_ebase(long v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	 * First write with WG=1 to write upper bits, then write again in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	 * WG should be left at 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	 * write_gc0_ebase_64() is no longer UNDEFINED since R6.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	if (sizeof(long) == 8 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	    (cpu_has_mips64r6 || cpu_has_ebase_wg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 		write_gc0_ebase_64(v | MIPS_EBASE_WG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 		write_gc0_ebase_64(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 		write_gc0_ebase(v | MIPS_EBASE_WG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 		write_gc0_ebase(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75)  * These Config bits may be writable by the guest:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76)  * Config:	[K23, KU] (!TLB), K0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77)  * Config1:	(none)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78)  * Config2:	[TU, SU] (impl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79)  * Config3:	ISAOnExc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80)  * Config4:	FTLBPageSize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81)  * Config5:	K, CV, MSAEn, UFE, FRE, SBRI, UFR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) static inline unsigned int kvm_vz_config_guest_wrmask(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	return CONF_CM_CMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) static inline unsigned int kvm_vz_config1_guest_wrmask(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) static inline unsigned int kvm_vz_config2_guest_wrmask(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) static inline unsigned int kvm_vz_config3_guest_wrmask(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	return MIPS_CONF3_ISA_OE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) static inline unsigned int kvm_vz_config4_guest_wrmask(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	/* no need to be exact */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	return MIPS_CONF4_VFTLBPAGESIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	unsigned int mask = MIPS_CONF5_K | MIPS_CONF5_CV | MIPS_CONF5_SBRI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	/* Permit MSAEn changes if MSA supported and enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	if (kvm_mips_guest_has_msa(&vcpu->arch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		mask |= MIPS_CONF5_MSAEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	 * Permit guest FPU mode changes if FPU is enabled and the relevant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	 * feature exists according to FIR register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 		if (cpu_has_ufr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 			mask |= MIPS_CONF5_UFR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		if (cpu_has_fre)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 			mask |= MIPS_CONF5_FRE | MIPS_CONF5_UFE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) static inline unsigned int kvm_vz_config6_guest_wrmask(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	return LOONGSON_CONF6_INTIMER | LOONGSON_CONF6_EXTIMER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138)  * VZ optionally allows these additional Config bits to be written by root:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139)  * Config:	M, [MT]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140)  * Config1:	M, [MMUSize-1, C2, MD, PC, WR, CA], FP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141)  * Config2:	M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142)  * Config3:	M, MSAP, [BPG], ULRI, [DSP2P, DSPP], CTXTC, [ITL, LPA, VEIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143)  *		VInt, SP, CDMM, MT, SM, TL]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144)  * Config4:	M, [VTLBSizeExt, MMUSizeExt]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145)  * Config5:	MRP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) static inline unsigned int kvm_vz_config_user_wrmask(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	return kvm_vz_config_guest_wrmask(vcpu) | MIPS_CONF_M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) static inline unsigned int kvm_vz_config1_user_wrmask(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	unsigned int mask = kvm_vz_config1_guest_wrmask(vcpu) | MIPS_CONF_M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	/* Permit FPU to be present if FPU is supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 		mask |= MIPS_CONF1_FP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	return kvm_vz_config2_guest_wrmask(vcpu) | MIPS_CONF_M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 		MIPS_CONF3_ULRI | MIPS_CONF3_CTXTC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	/* Permit MSA to be present if MSA is supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	if (kvm_mips_guest_can_have_msa(&vcpu->arch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		mask |= MIPS_CONF3_MSA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) static inline unsigned int kvm_vz_config4_user_wrmask(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	return kvm_vz_config4_guest_wrmask(vcpu) | MIPS_CONF_M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) static inline unsigned int kvm_vz_config6_user_wrmask(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	return kvm_vz_config6_guest_wrmask(vcpu) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		LOONGSON_CONF6_SFBEN | LOONGSON_CONF6_FTLBDIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	/* VZ guest has already converted gva to gpa */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	return gva;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) static void kvm_vz_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	set_bit(priority, &vcpu->arch.pending_exceptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) static void kvm_vz_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	clear_bit(priority, &vcpu->arch.pending_exceptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	set_bit(priority, &vcpu->arch.pending_exceptions_clr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) static void kvm_vz_queue_timer_int_cb(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	 * timer expiry is asynchronous to vcpu execution therefore defer guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	 * cp0 accesses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) static void kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	 * timer expiry is asynchronous to vcpu execution therefore defer guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	 * cp0 accesses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 				   struct kvm_mips_interrupt *irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	int intr = (int)irq->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	 * interrupts are asynchronous to vcpu execution therefore defer guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	 * cp0 accesses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	kvm_vz_queue_irq(vcpu, kvm_irq_to_priority(intr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 				     struct kvm_mips_interrupt *irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	int intr = (int)irq->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	 * interrupts are asynchronous to vcpu execution therefore defer guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	 * cp0 accesses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	kvm_vz_dequeue_irq(vcpu, kvm_irq_to_priority(-intr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 				 u32 cause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	u32 irq = (priority < MIPS_EXC_MAX) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		kvm_priority_to_irq[priority] : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	switch (priority) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	case MIPS_EXC_INT_TIMER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		set_gc0_cause(C_TI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	case MIPS_EXC_INT_IO_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	case MIPS_EXC_INT_IO_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	case MIPS_EXC_INT_IPI_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	case MIPS_EXC_INT_IPI_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		if (cpu_has_guestctl2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 			set_c0_guestctl2(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 			set_gc0_cause(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	clear_bit(priority, &vcpu->arch.pending_exceptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 			       u32 cause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	u32 irq = (priority < MIPS_EXC_MAX) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		kvm_priority_to_irq[priority] : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	switch (priority) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	case MIPS_EXC_INT_TIMER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		 * Call to kvm_write_c0_guest_compare() clears Cause.TI in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		 * kvm_mips_emulate_CP0(). Explicitly clear irq associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		 * Cause.IP[IPTI] if GuestCtl2 virtual interrupt register not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		 * supported or if not using GuestCtl2 Hardware Clear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		if (cpu_has_guestctl2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 			if (!(read_c0_guestctl2() & (irq << 14)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 				clear_c0_guestctl2(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 			clear_gc0_cause(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	case MIPS_EXC_INT_IO_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	case MIPS_EXC_INT_IO_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	case MIPS_EXC_INT_IPI_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	case MIPS_EXC_INT_IPI_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		/* Clear GuestCtl2.VIP irq if not using Hardware Clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		if (cpu_has_guestctl2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 			if (!(read_c0_guestctl2() & (irq << 14)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 				clear_c0_guestctl2(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 			clear_gc0_cause(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330)  * VZ guest timer handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334)  * kvm_vz_should_use_htimer() - Find whether to use the VZ hard guest timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335)  * @vcpu:	Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337)  * Returns:	true if the VZ GTOffset & real guest CP0_Count should be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338)  *		instead of software emulation of guest timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339)  *		false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) static bool kvm_vz_should_use_htimer(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	if (kvm_mips_count_disabled(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	/* Chosen frequency must match real frequency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	if (mips_hpt_frequency != vcpu->arch.count_hz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	/* We don't support a CP0_GTOffset with fewer bits than CP0_Count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	if (current_cpu_data.gtoffset_mask != 0xffffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358)  * _kvm_vz_restore_stimer() - Restore soft timer state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359)  * @vcpu:	Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360)  * @compare:	CP0_Compare register value, restored by caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361)  * @cause:	CP0_Cause register to restore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363)  * Restore VZ state relating to the soft timer. The hard timer can be enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364)  * later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 				   u32 cause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	 * Avoid spurious counter interrupts by setting Guest CP0_Count to just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	 * after Guest CP0_Compare.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	write_c0_gtoffset(compare - read_c0_count());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	back_to_back_c0_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	write_gc0_cause(cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380)  * _kvm_vz_restore_htimer() - Restore hard timer state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381)  * @vcpu:	Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382)  * @compare:	CP0_Compare register value, restored by caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383)  * @cause:	CP0_Cause register to restore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385)  * Restore hard timer Guest.Count & Guest.Cause taking care to preserve the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386)  * value of Guest.CP0_Cause.TI while restoring Guest.CP0_Cause.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 				   u32 compare, u32 cause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	u32 start_count, after_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	ktime_t freeze_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	 * Freeze the soft-timer and sync the guest CP0_Count with it. We do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	 * this with interrupts disabled to avoid latency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	freeze_time = kvm_mips_freeze_hrtimer(vcpu, &start_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	write_c0_gtoffset(start_count - read_c0_count());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	/* restore guest CP0_Cause, as TI may already be set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	back_to_back_c0_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	write_gc0_cause(cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	 * The above sequence isn't atomic and would result in lost timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	 * interrupts if we're not careful. Detect if a timer interrupt is due
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	 * and assert it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	back_to_back_c0_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	after_count = read_gc0_count();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	if (after_count - start_count > compare - start_count - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420)  * kvm_vz_restore_timer() - Restore timer state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421)  * @vcpu:	Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423)  * Restore soft timer state from saved context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	u32 cause, compare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	compare = kvm_read_sw_gc0_compare(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	cause = kvm_read_sw_gc0_cause(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	write_gc0_compare(compare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	_kvm_vz_restore_stimer(vcpu, compare, cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438)  * kvm_vz_acquire_htimer() - Switch to hard timer state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439)  * @vcpu:	Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441)  * Restore hard timer state on top of existing soft timer state if possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443)  * Since hard timer won't remain active over preemption, preemption should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444)  * disabled by the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	u32 gctl0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	gctl0 = read_c0_guestctl0();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	if (!(gctl0 & MIPS_GCTL0_GT) && kvm_vz_should_use_htimer(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		/* enable guest access to hard timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		write_c0_guestctl0(gctl0 | MIPS_GCTL0_GT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		_kvm_vz_restore_htimer(vcpu, read_gc0_compare(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 				       read_gc0_cause());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461)  * _kvm_vz_save_htimer() - Switch to software emulation of guest timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462)  * @vcpu:	Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463)  * @compare:	Pointer to write compare value to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464)  * @cause:	Pointer to write cause value to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466)  * Save VZ guest timer state and switch to software emulation of guest CP0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467)  * timer. The hard timer must already be in use, so preemption should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468)  * disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 				u32 *out_compare, u32 *out_cause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	u32 cause, compare, before_count, end_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	ktime_t before_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	compare = read_gc0_compare();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	*out_compare = compare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	before_time = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	 * Record the CP0_Count *prior* to saving CP0_Cause, so we have a time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	 * at which no pending timer interrupt is missing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	before_count = read_gc0_count();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	back_to_back_c0_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	cause = read_gc0_cause();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	*out_cause = cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	 * Record a final CP0_Count which we will transfer to the soft-timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	 * This is recorded *after* saving CP0_Cause, so we don't get any timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	 * interrupts from just after the final CP0_Count point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	back_to_back_c0_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	end_count = read_gc0_count();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	 * The above sequence isn't atomic, so we could miss a timer interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	 * between reading CP0_Cause and end_count. Detect and record any timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	 * interrupt due between before_count and end_count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	if (end_count - before_count > compare - before_count - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	 * Restore soft-timer, ignoring a small amount of negative drift due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	 * delay between freeze_hrtimer and setting CP0_GTOffset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	kvm_mips_restore_hrtimer(vcpu, before_time, end_count, -0x10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514)  * kvm_vz_save_timer() - Save guest timer state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515)  * @vcpu:	Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517)  * Save VZ guest timer state and switch to soft guest timer if hard timer was in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518)  * use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	u32 gctl0, compare, cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	gctl0 = read_c0_guestctl0();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	if (gctl0 & MIPS_GCTL0_GT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		/* disable guest use of hard timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		/* save hard timer state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		_kvm_vz_save_htimer(vcpu, &compare, &cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		compare = read_gc0_compare();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		cause = read_gc0_cause();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	/* save timer-related state to VCPU context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	kvm_write_sw_gc0_cause(cop0, cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	kvm_write_sw_gc0_compare(cop0, compare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543)  * kvm_vz_lose_htimer() - Ensure hard guest timer is not in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544)  * @vcpu:	Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546)  * Transfers the state of the hard guest timer to the soft guest timer, leaving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547)  * guest state intact so it can continue to be used with the soft timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	u32 gctl0, compare, cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	gctl0 = read_c0_guestctl0();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	if (gctl0 & MIPS_GCTL0_GT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		/* disable guest use of timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		/* switch to soft timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		_kvm_vz_save_htimer(vcpu, &compare, &cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		/* leave soft timer in usable state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		_kvm_vz_restore_stimer(vcpu, compare, cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569)  * is_eva_access() - Find whether an instruction is an EVA memory accessor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570)  * @inst:	32-bit instruction encoding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572)  * Finds whether @inst encodes an EVA memory access instruction, which would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573)  * indicate that emulation of it should access the user mode address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574)  * instead of the kernel mode address space. This matters for MUSUK segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575)  * which are TLB mapped for user mode but unmapped for kernel mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577)  * Returns:	Whether @inst encodes an EVA accessor instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) static bool is_eva_access(union mips_instruction inst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	if (inst.spec3_format.opcode != spec3_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	switch (inst.spec3_format.func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	case lwle_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	case lwre_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	case cachee_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	case sbe_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	case she_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	case sce_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	case swe_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	case swle_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	case swre_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	case prefe_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	case lbue_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	case lhue_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	case lbe_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	case lhe_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	case lle_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	case lwe_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608)  * is_eva_am_mapped() - Find whether an access mode is mapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609)  * @vcpu:	KVM VCPU state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610)  * @am:		3-bit encoded access mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611)  * @eu:		Segment becomes unmapped and uncached when Status.ERL=1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613)  * Decode @am to find whether it encodes a mapped segment for the current VCPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614)  * state. Where necessary @eu and the actual instruction causing the fault are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615)  * taken into account to make the decision.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617)  * Returns:	Whether the VCPU faulted on a TLB mapped address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) static bool is_eva_am_mapped(struct kvm_vcpu *vcpu, unsigned int am, bool eu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	u32 am_lookup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	 * Interpret access control mode. We assume address errors will already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	 * have been caught by the guest, leaving us with:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	 *      AM      UM  SM  KM  31..24 23..16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	 * UK    0 000          Unm   0      0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	 * MK    1 001          TLB   1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	 * MSK   2 010      TLB TLB   1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	 * MUSK  3 011  TLB TLB TLB   1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	 * MUSUK 4 100  TLB TLB Unm   0      1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	 * USK   5 101      Unm Unm   0      0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	 * -     6 110                0      0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	 * UUSK  7 111  Unm Unm Unm   0      0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	 * We shift a magic value by AM across the sign bit to find if always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	 * TLB mapped, and if not shift by 8 again to find if it depends on KM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	am_lookup = 0x70080000 << am;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	if ((s32)am_lookup < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		 * MK, MSK, MUSK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		 * Always TLB mapped, unless SegCtl.EU && ERL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		if (!eu || !(read_gc0_status() & ST0_ERL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		am_lookup <<= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		if ((s32)am_lookup < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 			union mips_instruction inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 			unsigned int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 			u32 *opc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 			 * MUSUK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 			 * TLB mapped if not in kernel mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 			status = read_gc0_status();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 			if (!(status & (ST0_EXL | ST0_ERL)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 			    (status & ST0_KSU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 				return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 			 * EVA access instructions in kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 			 * mode access user address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 			opc = (u32 *)vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 			if (vcpu->arch.host_cp0_cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 				opc += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 			err = kvm_get_badinstr(opc, vcpu, &inst.word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 			if (!err && is_eva_access(inst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 				return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680)  * kvm_vz_gva_to_gpa() - Convert valid GVA to GPA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681)  * @vcpu:	KVM VCPU state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682)  * @gva:	Guest virtual address to convert.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683)  * @gpa:	Output guest physical address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685)  * Convert a guest virtual address (GVA) which is valid according to the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686)  * context, to a guest physical address (GPA).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688)  * Returns:	0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689)  *		-errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 			     unsigned long *gpa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	u32 gva32 = gva;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	unsigned long segctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	if ((long)gva == (s32)gva32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		/* Handle canonical 32-bit virtual address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		if (cpu_guest_has_segments) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 			unsigned long mask, pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 			switch (gva32 >> 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 			case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 			case 1: /* CFG5 (1GB) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 				segctl = read_gc0_segctl2() >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 				mask = (unsigned long)0xfc0000000ull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 			case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 			case 3: /* CFG4 (1GB) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 				segctl = read_gc0_segctl2();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 				mask = (unsigned long)0xfc0000000ull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 			case 4: /* CFG3 (512MB) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 				segctl = read_gc0_segctl1() >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 				mask = (unsigned long)0xfe0000000ull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 			case 5: /* CFG2 (512MB) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 				segctl = read_gc0_segctl1();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 				mask = (unsigned long)0xfe0000000ull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 			case 6: /* CFG1 (512MB) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 				segctl = read_gc0_segctl0() >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 				mask = (unsigned long)0xfe0000000ull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 			case 7: /* CFG0 (512MB) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 				segctl = read_gc0_segctl0();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 				mask = (unsigned long)0xfe0000000ull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 				 * GCC 4.9 isn't smart enough to figure out that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 				 * segctl and mask are always initialised.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 				unreachable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 			if (is_eva_am_mapped(vcpu, (segctl >> 4) & 0x7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 					     segctl & 0x0008))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 				goto tlb_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 			/* Unmapped, find guest physical address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 			pa = (segctl << 20) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 			pa |= gva32 & ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 			*gpa = pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		} else if ((s32)gva32 < (s32)0xc0000000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 			/* legacy unmapped KSeg0 or KSeg1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 			*gpa = gva32 & 0x1fffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	} else if ((gva & 0xc000000000000000) == 0x8000000000000000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		/* XKPHYS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		if (cpu_guest_has_segments) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 			 * Each of the 8 regions can be overridden by SegCtl2.XR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 			 * to use SegCtl1.XAM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 			segctl = read_gc0_segctl2();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 			if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 				segctl = read_gc0_segctl1();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 				if (is_eva_am_mapped(vcpu, (segctl >> 59) & 0x7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 						     0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 					goto tlb_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		 * Traditionally fully unmapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		 * Bits 61:59 specify the CCA, which we can just mask off here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		 * Bits 58:PABITS should be zero, but we shouldn't have got here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		 * if it wasn't.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		*gpa = gva & 0x07ffffffffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) tlb_mapped:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784)  * kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785)  * @vcpu:	KVM VCPU state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786)  * @badvaddr:	Root BadVAddr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787)  * @gpa:	Output guest physical address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789)  * VZ implementations are permitted to report guest virtual addresses (GVA) in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790)  * BadVAddr on a root exception during guest execution, instead of the more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791)  * convenient guest physical addresses (GPA). When we get a GVA, this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792)  * converts it to a GPA, taking into account guest segmentation and guest TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793)  * state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795)  * Returns:	0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796)  *		-errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) static int kvm_vz_badvaddr_to_gpa(struct kvm_vcpu *vcpu, unsigned long badvaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 				  unsigned long *gpa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 				 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	/* If BadVAddr is GPA, then all is well in the world */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	if (likely(gexccode == MIPS_GCTL0_GEXC_GPA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		*gpa = badvaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	/* Otherwise we'd expect it to be GVA ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	if (WARN(gexccode != MIPS_GCTL0_GEXC_GVA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		 "Unexpected gexccode %#x\n", gexccode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	/* ... and we need to perform the GVA->GPA translation in software */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) static int kvm_trap_vz_no_handler(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	u32 *opc = (u32 *) vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	u32 cause = vcpu->arch.host_cp0_cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	u32 inst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	 *  Fetch the instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	if (cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		opc += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	kvm_get_badinstr(opc, vcpu, &inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		exccode, opc, inst, badvaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		read_gc0_status());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	kvm_arch_vcpu_dump_regs(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	return RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) static unsigned long mips_process_maar(unsigned int op, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	/* Mask off unused bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	unsigned long mask = 0xfffff000 | MIPS_MAAR_S | MIPS_MAAR_VL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	if (read_gc0_pagegrain() & PG_ELPA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		mask |= 0x00ffffff00000000ull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	if (cpu_guest_has_mvh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		mask |= MIPS_MAAR_VH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	/* Set or clear VH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	if (op == mtc_op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		/* clear VH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		val &= ~MIPS_MAAR_VH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	} else if (op == dmtc_op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		/* set VH to match VL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		val &= ~MIPS_MAAR_VH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		if (val & MIPS_MAAR_VL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 			val |= MIPS_MAAR_VH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	return val & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	val &= MIPS_MAARI_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	if (val == MIPS_MAARI_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	else if (val < ARRAY_SIZE(vcpu->arch.maar))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		kvm_write_sw_gc0_maari(cop0, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 					      u32 *opc, u32 cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 					      struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	u32 rt, rd, sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	unsigned long curr_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	 * Update PC and hold onto current PC in case there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	 * an error and we want to rollback the PC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	curr_pc = vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	er = update_pc(vcpu, cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	if (er == EMULATE_FAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	if (inst.co_format.co) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		switch (inst.co_format.func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		case wait_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 			er = kvm_mips_emul_wait(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 			er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		rt = inst.c0r_format.rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		rd = inst.c0r_format.rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		sel = inst.c0r_format.sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		switch (inst.c0r_format.rs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		case dmfc_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		case mfc_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 			cop0->stat[rd][sel]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 			if (rd == MIPS_CP0_COUNT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 			    sel == 0) {			/* Count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 				val = kvm_mips_read_count(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 			} else if (rd == MIPS_CP0_COMPARE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 				   sel == 0) {		/* Compare */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 				val = read_gc0_compare();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 			} else if (rd == MIPS_CP0_LLADDR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 				   sel == 0) {		/* LLAddr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 				if (cpu_guest_has_rw_llb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 					val = read_gc0_lladdr() &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 						MIPS_LLADDR_LLB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 					val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 			} else if (rd == MIPS_CP0_LLADDR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 				   sel == 1 &&		/* MAAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 				   cpu_guest_has_maar &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 				   !cpu_guest_has_dyn_maar) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 				/* MAARI must be in range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 				BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 						ARRAY_SIZE(vcpu->arch.maar));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 				val = vcpu->arch.maar[
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 					kvm_read_sw_gc0_maari(cop0)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 			} else if ((rd == MIPS_CP0_PRID &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 				    (sel == 0 ||	/* PRid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 				     sel == 2 ||	/* CDMMBase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 				     sel == 3)) ||	/* CMGCRBase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 				   (rd == MIPS_CP0_STATUS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 				    (sel == 2 ||	/* SRSCtl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 				     sel == 3)) ||	/* SRSMap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 				   (rd == MIPS_CP0_CONFIG &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 				    (sel == 6 ||	/* Config6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 				     sel == 7)) ||	/* Config7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 				   (rd == MIPS_CP0_LLADDR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 				    (sel == 2) &&	/* MAARI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 				    cpu_guest_has_maar &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 				    !cpu_guest_has_dyn_maar) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 				   (rd == MIPS_CP0_ERRCTL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 				    (sel == 0))) {	/* ErrCtl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 				val = cop0->reg[rd][sel];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) #ifdef CONFIG_CPU_LOONGSON64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 			} else if (rd == MIPS_CP0_DIAG &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 				   (sel == 0)) {	/* Diag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 				val = cop0->reg[rd][sel];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 				val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 				er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 			if (er != EMULATE_FAIL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 				/* Sign extend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 				if (inst.c0r_format.rs == mfc_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 					val = (int)val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 				vcpu->arch.gprs[rt] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 			trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mfc_op) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 					KVM_TRACE_MFC0 : KVM_TRACE_DMFC0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 				      KVM_TRACE_COP0(rd, sel), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		case dmtc_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		case mtc_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 			cop0->stat[rd][sel]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 			val = vcpu->arch.gprs[rt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 			trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mtc_op) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 					KVM_TRACE_MTC0 : KVM_TRACE_DMTC0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 				      KVM_TRACE_COP0(rd, sel), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 			if (rd == MIPS_CP0_COUNT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 			    sel == 0) {			/* Count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 				kvm_vz_lose_htimer(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 				kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 			} else if (rd == MIPS_CP0_COMPARE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 				   sel == 0) {		/* Compare */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 				kvm_mips_write_compare(vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 						       vcpu->arch.gprs[rt],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 						       true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 			} else if (rd == MIPS_CP0_LLADDR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 				   sel == 0) {		/* LLAddr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 				 * P5600 generates GPSI on guest MTC0 LLAddr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 				 * Only allow the guest to clear LLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 				if (cpu_guest_has_rw_llb &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 				    !(val & MIPS_LLADDR_LLB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 					write_gc0_lladdr(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 			} else if (rd == MIPS_CP0_LLADDR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 				   sel == 1 &&		/* MAAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 				   cpu_guest_has_maar &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 				   !cpu_guest_has_dyn_maar) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 				val = mips_process_maar(inst.c0r_format.rs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 							val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 				/* MAARI must be in range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 				BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 						ARRAY_SIZE(vcpu->arch.maar));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 				vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 									val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 			} else if (rd == MIPS_CP0_LLADDR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 				   (sel == 2) &&	/* MAARI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 				   cpu_guest_has_maar &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 				   !cpu_guest_has_dyn_maar) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 				kvm_write_maari(vcpu, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 			} else if (rd == MIPS_CP0_CONFIG &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 				   (sel == 6)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 				cop0->reg[rd][sel] = (int)val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 			} else if (rd == MIPS_CP0_ERRCTL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 				   (sel == 0)) {	/* ErrCtl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 				/* ignore the written value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) #ifdef CONFIG_CPU_LOONGSON64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 			} else if (rd == MIPS_CP0_DIAG &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 				   (sel == 0)) {	/* Diag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 				unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 				local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 				if (val & LOONGSON_DIAG_BTB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 					/* Flush BTB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 					set_c0_diag(LOONGSON_DIAG_BTB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 				if (val & LOONGSON_DIAG_ITLB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 					/* Flush ITLB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 					set_c0_diag(LOONGSON_DIAG_ITLB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 				if (val & LOONGSON_DIAG_DTLB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 					/* Flush DTLB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 					set_c0_diag(LOONGSON_DIAG_DTLB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 				if (val & LOONGSON_DIAG_VTLB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 					/* Flush VTLB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 					kvm_loongson_clear_guest_vtlb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 				if (val & LOONGSON_DIAG_FTLB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 					/* Flush FTLB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 					kvm_loongson_clear_guest_ftlb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 				local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 				er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 			er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	/* Rollback PC only if emulation was unsuccessful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	if (er == EMULATE_FAIL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 			curr_pc, __func__, inst.word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		vcpu->arch.pc = curr_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 					       u32 *opc, u32 cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 					       struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	u32 cache, op_inst, op, base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	s16 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	struct kvm_vcpu_arch *arch = &vcpu->arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	unsigned long va, curr_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	 * Update PC and hold onto current PC in case there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	 * an error and we want to rollback the PC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	curr_pc = vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	er = update_pc(vcpu, cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	if (er == EMULATE_FAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	base = inst.i_format.rs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	op_inst = inst.i_format.rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	if (cpu_has_mips_r6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		offset = inst.spec3_format.simmediate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		offset = inst.i_format.simmediate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	cache = op_inst & CacheOp_Cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	op = op_inst & CacheOp_Op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	va = arch->gprs[base] + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		  cache, op, base, arch->gprs[base], offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	/* Secondary or tirtiary cache ops ignored */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	if (cache != Cache_I && cache != Cache_D)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	switch (op_inst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	case Index_Invalidate_I:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		flush_icache_line_indexed(va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	case Index_Writeback_Inv_D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		flush_dcache_line_indexed(va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	case Hit_Invalidate_I:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	case Hit_Invalidate_D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	case Hit_Writeback_Inv_D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		if (boot_cpu_type() == CPU_CAVIUM_OCTEON3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 			/* We can just flush entire icache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 			local_flush_icache_range(0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 			return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		/* So far, other platforms support guest hit cache ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	/* Rollback PC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	vcpu->arch.pc = curr_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	return EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) #ifdef CONFIG_CPU_LOONGSON64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) static enum emulation_result kvm_vz_gpsi_lwc2(union mips_instruction inst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 					      u32 *opc, u32 cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 					      struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	unsigned int rs, rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	unsigned int hostcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	unsigned long curr_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	 * Update PC and hold onto current PC in case there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	 * an error and we want to rollback the PC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	curr_pc = vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	er = update_pc(vcpu, cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	if (er == EMULATE_FAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	rs = inst.loongson3_lscsr_format.rs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	rd = inst.loongson3_lscsr_format.rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	switch (inst.loongson3_lscsr_format.fr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	case 0x8:  /* Read CPUCFG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		++vcpu->stat.vz_cpucfg_exits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		hostcfg = read_cpucfg(vcpu->arch.gprs[rs]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		switch (vcpu->arch.gprs[rs]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		case LOONGSON_CFG0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 			vcpu->arch.gprs[rd] = 0x14c000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		case LOONGSON_CFG1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 			hostcfg &= (LOONGSON_CFG1_FP | LOONGSON_CFG1_MMI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 				    LOONGSON_CFG1_MSA1 | LOONGSON_CFG1_MSA2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 				    LOONGSON_CFG1_SFBP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 			vcpu->arch.gprs[rd] = hostcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		case LOONGSON_CFG2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 			hostcfg &= (LOONGSON_CFG2_LEXT1 | LOONGSON_CFG2_LEXT2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 				    LOONGSON_CFG2_LEXT3 | LOONGSON_CFG2_LSPW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 			vcpu->arch.gprs[rd] = hostcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		case LOONGSON_CFG3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 			vcpu->arch.gprs[rd] = hostcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 			/* Don't export any other advanced features to guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 			vcpu->arch.gprs[rd] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		kvm_err("lwc2 emulate not impl %d rs %lx @%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 			inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	/* Rollback PC only if emulation was unsuccessful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	if (er == EMULATE_FAIL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		kvm_err("[%#lx]%s: unsupported lwc2 instruction 0x%08x 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 			curr_pc, __func__, inst.word, inst.loongson3_lscsr_format.fr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		vcpu->arch.pc = curr_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 						     struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	struct kvm_vcpu_arch *arch = &vcpu->arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	union mips_instruction inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	int rd, rt, sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	 *  Fetch the instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	if (cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		opc += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	err = kvm_get_badinstr(opc, vcpu, &inst.word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		return EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	switch (inst.r_format.opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	case cop0_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		er = kvm_vz_gpsi_cop0(inst, opc, cause, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) #ifndef CONFIG_CPU_MIPSR6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	case cache_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) #ifdef CONFIG_CPU_LOONGSON64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	case lwc2_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		er = kvm_vz_gpsi_lwc2(inst, opc, cause, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	case spec3_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		switch (inst.spec3_format.func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) #ifdef CONFIG_CPU_MIPSR6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		case cache6_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 			trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 			er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		case rdhwr_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 			if (inst.r_format.rs || (inst.r_format.re >> 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 				goto unknown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 			rd = inst.r_format.rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 			rt = inst.r_format.rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 			sel = inst.r_format.re & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 			switch (rd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 			case MIPS_HWR_CC:	/* Read count register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 				arch->gprs[rt] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 					(long)(int)kvm_mips_read_count(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 				trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 					      KVM_TRACE_HWR(rd, sel), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 				goto unknown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 			trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 				      KVM_TRACE_HWR(rd, sel), arch->gprs[rt]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 			er = update_pc(vcpu, cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 			goto unknown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) unknown:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		kvm_err("GPSI exception not supported (%p/%#x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 				opc, inst.word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 		kvm_arch_vcpu_dump_regs(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 						     struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	struct kvm_vcpu_arch *arch = &vcpu->arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	union mips_instruction inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	 *  Fetch the instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	if (cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		opc += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	err = kvm_get_badinstr(opc, vcpu, &inst.word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		return EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	/* complete MTC0 on behalf of guest and advance EPC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	if (inst.c0r_format.opcode == cop0_op &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	    inst.c0r_format.rs == mtc_op &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	    inst.c0r_format.z == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		int rt = inst.c0r_format.rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		int rd = inst.c0r_format.rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		int sel = inst.c0r_format.sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		unsigned int val = arch->gprs[rt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		unsigned int old_val, change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 			      val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 			/* FR bit should read as zero if no FPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 			if (!kvm_mips_guest_has_fpu(&vcpu->arch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 				val &= ~(ST0_CU1 | ST0_FR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 			 * Also don't allow FR to be set if host doesn't support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 			 * it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 			if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 				val &= ~ST0_FR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 			old_val = read_gc0_status();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 			change = val ^ old_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 			if (change & ST0_FR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 				 * FPU and Vector register state is made
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 				 * UNPREDICTABLE by a change of FR, so don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 				 * even bother saving it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 				kvm_drop_fpu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 			 * If MSA state is already live, it is undefined how it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 			 * interacts with FR=0 FPU state, and we don't want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 			 * hit reserved instruction exceptions trying to save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 			 * the MSA state later when CU=1 && FR=1, so play it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 			 * safe and save it first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 			if (change & ST0_CU1 && !(val & ST0_FR) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 			    vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 				kvm_lose_fpu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 			write_gc0_status(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 		} else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 			u32 old_cause = read_gc0_cause();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 			u32 change = old_cause ^ val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 			/* DC bit enabling/disabling timer? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 			if (change & CAUSEF_DC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 				if (val & CAUSEF_DC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 					kvm_vz_lose_htimer(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 					kvm_mips_count_disable_cause(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 				} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 					kvm_mips_count_enable_cause(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 			/* Only certain bits are RW to the guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 			change &= (CAUSEF_DC | CAUSEF_IV | CAUSEF_WP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 				   CAUSEF_IP0 | CAUSEF_IP1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 			/* WP can only be cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 			change &= ~CAUSEF_WP | old_cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 			write_gc0_cause(old_cause ^ change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		} else if ((rd == MIPS_CP0_STATUS) && (sel == 1)) { /* IntCtl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 			write_gc0_intctl(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		} else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 			old_val = read_gc0_config5();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 			change = val ^ old_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 			/* Handle changes in FPU/MSA modes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 			preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 			 * Propagate FRE changes immediately if the FPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 			 * context is already loaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 			if (change & MIPS_CONF5_FRE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 			    vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 				change_c0_config5(MIPS_CONF5_FRE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 			preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 			val = old_val ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 				(change & kvm_vz_config5_guest_wrmask(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 			write_gc0_config5(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 			kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 			    opc, inst.word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 			er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		if (er != EMULATE_FAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 			er = update_pc(vcpu, cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 		kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 			opc, inst.word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) static enum emulation_result kvm_trap_vz_handle_ghfc(u32 cause, u32 *opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 						     struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	 * Presumably this is due to MC (guest mode change), so lets trace some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	 * relevant info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	trace_kvm_guest_mode_change(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 						   struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	enum emulation_result er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	union mips_instruction inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	unsigned long curr_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	if (cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		opc += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	err = kvm_get_badinstr(opc, vcpu, &inst.word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		return EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	 * Update PC and hold onto current PC in case there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	 * an error and we want to rollback the PC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	curr_pc = vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	er = update_pc(vcpu, cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	if (er == EMULATE_FAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	er = kvm_mips_emul_hypcall(vcpu, inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	if (er == EMULATE_FAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		vcpu->arch.pc = curr_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 							u32 cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 							u32 *opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 							struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	u32 inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	 *  Fetch the instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	if (cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		opc += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	kvm_get_badinstr(opc, vcpu, &inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x  Status: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		gexccode, opc, inst, read_gc0_status());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	return EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	u32 *opc = (u32 *) vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	u32 cause = vcpu->arch.host_cp0_cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	u32 gexccode = (vcpu->arch.host_cp0_guestctl0 &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 			MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	int ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	switch (gexccode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	case MIPS_GCTL0_GEXC_GPSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 		++vcpu->stat.vz_gpsi_exits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 		er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	case MIPS_GCTL0_GEXC_GSFC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		++vcpu->stat.vz_gsfc_exits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 		er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	case MIPS_GCTL0_GEXC_HC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 		++vcpu->stat.vz_hc_exits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 		er = kvm_trap_vz_handle_hc(cause, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	case MIPS_GCTL0_GEXC_GRR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		++vcpu->stat.vz_grr_exits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 						       vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	case MIPS_GCTL0_GEXC_GVA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		++vcpu->stat.vz_gva_exits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 						       vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	case MIPS_GCTL0_GEXC_GHFC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 		++vcpu->stat.vz_ghfc_exits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 		er = kvm_trap_vz_handle_ghfc(cause, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	case MIPS_GCTL0_GEXC_GPA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 		++vcpu->stat.vz_gpa_exits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 		er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 						       vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 		++vcpu->stat.vz_resvd_exits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 		er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 						       vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	if (er == EMULATE_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 		ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	} else if (er == EMULATE_HYPERCALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		ret = kvm_mips_handle_hypcall(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 		ret = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)  * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)  * @vcpu:	Virtual CPU context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)  * Handle when the guest attempts to use a coprocessor which hasn't been allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)  * by the root context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	u32 cause = vcpu->arch.host_cp0_cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	enum emulation_result er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	int ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 		 * If guest FPU not present, the FPU operation should have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 		 * treated as a reserved instruction!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		 * If FPU already in use, we shouldn't get this at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 		if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 			    vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 			preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 			return EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		kvm_own_fpu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 		er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	/* other coprocessors not handled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	switch (er) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	case EMULATE_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 		ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	case EMULATE_FAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 		ret = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)  * kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)  * @vcpu:	Virtual CPU context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)  * Handle when the guest attempts to use MSA when it is disabled in the root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)  * context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	 * If MSA not present or not exposed to guest or FR=0, the MSA operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	 * should have been treated as a reserved instruction!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	 * Same if CU1=1, FR=0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	 * If MSA already in use, we shouldn't get this at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	    (read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	    !(read_gc0_config5() & MIPS_CONF5_MSAEN) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	    vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 		return RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	kvm_own_msa(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	return RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	struct kvm_run *run = vcpu->run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	u32 *opc = (u32 *) vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	u32 cause = vcpu->arch.host_cp0_cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	union mips_instruction inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	int err, ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 		/* A code fetch fault doesn't count as an MMIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 		if (kvm_is_ifetch_fault(&vcpu->arch)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 			return RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 		/* Fetch the instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		if (cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 			opc += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		err = kvm_get_badinstr(opc, vcpu, &inst.word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 			return RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 		/* Treat as MMIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 		er = kvm_mips_emulate_load(inst, cause, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 		if (er == EMULATE_FAIL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 			kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 				opc, badvaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	if (er == EMULATE_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 		ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	} else if (er == EMULATE_DO_MMIO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		run->exit_reason = KVM_EXIT_MMIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		ret = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 		ret = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	struct kvm_run *run = vcpu->run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	u32 *opc = (u32 *) vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	u32 cause = vcpu->arch.host_cp0_cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	union mips_instruction inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	int ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	/* Just try the access again if we couldn't do the translation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 		return RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	vcpu->arch.host_cp0_badvaddr = badvaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 		/* Fetch the instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 		if (cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 			opc += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 		err = kvm_get_badinstr(opc, vcpu, &inst.word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 			return RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 		/* Treat as MMIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 		er = kvm_mips_emulate_store(inst, cause, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 		if (er == EMULATE_FAIL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 			kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 				opc, badvaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	if (er == EMULATE_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 		ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	} else if (er == EMULATE_DO_MMIO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 		run->exit_reason = KVM_EXIT_MMIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 		ret = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 		ret = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) static u64 kvm_vz_get_one_regs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	KVM_REG_MIPS_CP0_INDEX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	KVM_REG_MIPS_CP0_ENTRYLO0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	KVM_REG_MIPS_CP0_ENTRYLO1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	KVM_REG_MIPS_CP0_CONTEXT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	KVM_REG_MIPS_CP0_PAGEMASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	KVM_REG_MIPS_CP0_PAGEGRAIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	KVM_REG_MIPS_CP0_WIRED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	KVM_REG_MIPS_CP0_HWRENA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	KVM_REG_MIPS_CP0_BADVADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	KVM_REG_MIPS_CP0_COUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	KVM_REG_MIPS_CP0_ENTRYHI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	KVM_REG_MIPS_CP0_COMPARE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	KVM_REG_MIPS_CP0_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	KVM_REG_MIPS_CP0_INTCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	KVM_REG_MIPS_CP0_CAUSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	KVM_REG_MIPS_CP0_EPC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	KVM_REG_MIPS_CP0_PRID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	KVM_REG_MIPS_CP0_EBASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	KVM_REG_MIPS_CP0_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	KVM_REG_MIPS_CP0_CONFIG1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	KVM_REG_MIPS_CP0_CONFIG2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	KVM_REG_MIPS_CP0_CONFIG3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	KVM_REG_MIPS_CP0_CONFIG4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	KVM_REG_MIPS_CP0_CONFIG5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	KVM_REG_MIPS_CP0_CONFIG6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	KVM_REG_MIPS_CP0_XCONTEXT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	KVM_REG_MIPS_CP0_ERROREPC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	KVM_REG_MIPS_COUNT_CTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	KVM_REG_MIPS_COUNT_RESUME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	KVM_REG_MIPS_COUNT_HZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) static u64 kvm_vz_get_one_regs_contextconfig[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	KVM_REG_MIPS_CP0_CONTEXTCONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	KVM_REG_MIPS_CP0_XCONTEXTCONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) static u64 kvm_vz_get_one_regs_segments[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	KVM_REG_MIPS_CP0_SEGCTL0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	KVM_REG_MIPS_CP0_SEGCTL1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	KVM_REG_MIPS_CP0_SEGCTL2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) static u64 kvm_vz_get_one_regs_htw[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	KVM_REG_MIPS_CP0_PWBASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	KVM_REG_MIPS_CP0_PWFIELD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	KVM_REG_MIPS_CP0_PWSIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	KVM_REG_MIPS_CP0_PWCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) static u64 kvm_vz_get_one_regs_kscratch[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	KVM_REG_MIPS_CP0_KSCRATCH1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	KVM_REG_MIPS_CP0_KSCRATCH2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	KVM_REG_MIPS_CP0_KSCRATCH3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	KVM_REG_MIPS_CP0_KSCRATCH4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	KVM_REG_MIPS_CP0_KSCRATCH5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	KVM_REG_MIPS_CP0_KSCRATCH6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	ret = ARRAY_SIZE(kvm_vz_get_one_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	if (cpu_guest_has_userlocal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 		++ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	if (cpu_guest_has_badinstr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 		++ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	if (cpu_guest_has_badinstrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		++ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	if (cpu_guest_has_contextconfig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 		ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	if (cpu_guest_has_segments)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 		ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	if (cpu_guest_has_htw || cpu_guest_has_ldpte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 		ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 		ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	u64 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	if (copy_to_user(indices, kvm_vz_get_one_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 			 sizeof(kvm_vz_get_one_regs)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	indices += ARRAY_SIZE(kvm_vz_get_one_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	if (cpu_guest_has_userlocal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 		index = KVM_REG_MIPS_CP0_USERLOCAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 		if (copy_to_user(indices, &index, sizeof(index)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 		++indices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	if (cpu_guest_has_badinstr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 		index = KVM_REG_MIPS_CP0_BADINSTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 		if (copy_to_user(indices, &index, sizeof(index)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 		++indices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	if (cpu_guest_has_badinstrp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		index = KVM_REG_MIPS_CP0_BADINSTRP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		if (copy_to_user(indices, &index, sizeof(index)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 		++indices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	if (cpu_guest_has_contextconfig) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 		if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 				 sizeof(kvm_vz_get_one_regs_contextconfig)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 		indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	if (cpu_guest_has_segments) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 		if (copy_to_user(indices, kvm_vz_get_one_regs_segments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 				 sizeof(kvm_vz_get_one_regs_segments)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 		indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 		if (copy_to_user(indices, kvm_vz_get_one_regs_htw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 				 sizeof(kvm_vz_get_one_regs_htw)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 		indices += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 		for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 			index = KVM_REG_MIPS_CP0_MAAR(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 			if (copy_to_user(indices, &index, sizeof(index)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 				return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 			++indices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		index = KVM_REG_MIPS_CP0_MAARI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 		if (copy_to_user(indices, &index, sizeof(index)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 		++indices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	for (i = 0; i < 6; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 		if (!cpu_guest_has_kscr(i + 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 		if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 				 sizeof(kvm_vz_get_one_regs_kscratch[i])))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 		++indices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) static inline s64 entrylo_kvm_to_user(unsigned long v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	s64 mask, ret = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	if (BITS_PER_LONG == 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 		 * KVM API exposes 64-bit version of the register, so move the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 		 * RI/XI bits up into place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 		mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 		ret &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 		ret |= ((s64)v & mask) << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) static inline unsigned long entrylo_user_to_kvm(s64 v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	unsigned long mask, ret = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	if (BITS_PER_LONG == 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 		 * KVM API exposes 64-bit versiono of the register, so move the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 		 * RI/XI bits down into place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 		mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 		ret &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 		ret |= (v >> 32) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 			      const struct kvm_one_reg *reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 			      s64 *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	switch (reg->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	case KVM_REG_MIPS_CP0_INDEX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 		*v = (long)read_gc0_index();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	case KVM_REG_MIPS_CP0_ENTRYLO0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 		*v = entrylo_kvm_to_user(read_gc0_entrylo0());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 	case KVM_REG_MIPS_CP0_ENTRYLO1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 		*v = entrylo_kvm_to_user(read_gc0_entrylo1());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	case KVM_REG_MIPS_CP0_CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 		*v = (long)read_gc0_context();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 		if (!cpu_guest_has_contextconfig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 		*v = read_gc0_contextconfig();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	case KVM_REG_MIPS_CP0_USERLOCAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 		if (!cpu_guest_has_userlocal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 		*v = read_gc0_userlocal();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 	case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 		if (!cpu_guest_has_contextconfig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 		*v = read_gc0_xcontextconfig();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	case KVM_REG_MIPS_CP0_PAGEMASK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 		*v = (long)read_gc0_pagemask();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	case KVM_REG_MIPS_CP0_PAGEGRAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 		*v = (long)read_gc0_pagegrain();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	case KVM_REG_MIPS_CP0_SEGCTL0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 		if (!cpu_guest_has_segments)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 		*v = read_gc0_segctl0();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	case KVM_REG_MIPS_CP0_SEGCTL1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 		if (!cpu_guest_has_segments)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 		*v = read_gc0_segctl1();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	case KVM_REG_MIPS_CP0_SEGCTL2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 		if (!cpu_guest_has_segments)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 		*v = read_gc0_segctl2();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	case KVM_REG_MIPS_CP0_PWBASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 		if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 		*v = read_gc0_pwbase();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	case KVM_REG_MIPS_CP0_PWFIELD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 		*v = read_gc0_pwfield();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	case KVM_REG_MIPS_CP0_PWSIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 		*v = read_gc0_pwsize();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	case KVM_REG_MIPS_CP0_WIRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 		*v = (long)read_gc0_wired();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	case KVM_REG_MIPS_CP0_PWCTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 		if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 		*v = read_gc0_pwctl();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	case KVM_REG_MIPS_CP0_HWRENA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 		*v = (long)read_gc0_hwrena();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	case KVM_REG_MIPS_CP0_BADVADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 		*v = (long)read_gc0_badvaddr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	case KVM_REG_MIPS_CP0_BADINSTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		if (!cpu_guest_has_badinstr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 		*v = read_gc0_badinstr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	case KVM_REG_MIPS_CP0_BADINSTRP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 		if (!cpu_guest_has_badinstrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 		*v = read_gc0_badinstrp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	case KVM_REG_MIPS_CP0_COUNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 		*v = kvm_mips_read_count(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	case KVM_REG_MIPS_CP0_ENTRYHI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 		*v = (long)read_gc0_entryhi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	case KVM_REG_MIPS_CP0_COMPARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 		*v = (long)read_gc0_compare();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 	case KVM_REG_MIPS_CP0_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 		*v = (long)read_gc0_status();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	case KVM_REG_MIPS_CP0_INTCTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 		*v = read_gc0_intctl();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	case KVM_REG_MIPS_CP0_CAUSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 		*v = (long)read_gc0_cause();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	case KVM_REG_MIPS_CP0_EPC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 		*v = (long)read_gc0_epc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	case KVM_REG_MIPS_CP0_PRID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 		switch (boot_cpu_type()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 		case CPU_CAVIUM_OCTEON3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 			/* Octeon III has a read-only guest.PRid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 			*v = read_gc0_prid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 			*v = (long)kvm_read_c0_guest_prid(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	case KVM_REG_MIPS_CP0_EBASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 		*v = kvm_vz_read_gc0_ebase();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	case KVM_REG_MIPS_CP0_CONFIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 		*v = read_gc0_config();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	case KVM_REG_MIPS_CP0_CONFIG1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 		if (!cpu_guest_has_conf1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 		*v = read_gc0_config1();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	case KVM_REG_MIPS_CP0_CONFIG2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 		if (!cpu_guest_has_conf2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 		*v = read_gc0_config2();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	case KVM_REG_MIPS_CP0_CONFIG3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 		if (!cpu_guest_has_conf3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 		*v = read_gc0_config3();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	case KVM_REG_MIPS_CP0_CONFIG4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 		if (!cpu_guest_has_conf4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 		*v = read_gc0_config4();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	case KVM_REG_MIPS_CP0_CONFIG5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 		if (!cpu_guest_has_conf5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 		*v = read_gc0_config5();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	case KVM_REG_MIPS_CP0_CONFIG6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 		*v = kvm_read_sw_gc0_config6(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 		if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 		idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 		if (idx >= ARRAY_SIZE(vcpu->arch.maar))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 		*v = vcpu->arch.maar[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	case KVM_REG_MIPS_CP0_MAARI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 		if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 		*v = kvm_read_sw_gc0_maari(vcpu->arch.cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	case KVM_REG_MIPS_CP0_XCONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 		*v = read_gc0_xcontext();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	case KVM_REG_MIPS_CP0_ERROREPC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 		*v = (long)read_gc0_errorepc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 		idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 		if (!cpu_guest_has_kscr(idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 		switch (idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 		case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 			*v = (long)read_gc0_kscratch1();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 		case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 			*v = (long)read_gc0_kscratch2();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 		case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 			*v = (long)read_gc0_kscratch3();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 		case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 			*v = (long)read_gc0_kscratch4();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 		case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 			*v = (long)read_gc0_kscratch5();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 		case 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 			*v = (long)read_gc0_kscratch6();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	case KVM_REG_MIPS_COUNT_CTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 		*v = vcpu->arch.count_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 	case KVM_REG_MIPS_COUNT_RESUME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 		*v = ktime_to_ns(vcpu->arch.count_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	case KVM_REG_MIPS_COUNT_HZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 		*v = vcpu->arch.count_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 			      const struct kvm_one_reg *reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 			      s64 v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	unsigned int cur, change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 	switch (reg->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	case KVM_REG_MIPS_CP0_INDEX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 		write_gc0_index(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	case KVM_REG_MIPS_CP0_ENTRYLO0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 		write_gc0_entrylo0(entrylo_user_to_kvm(v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	case KVM_REG_MIPS_CP0_ENTRYLO1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 		write_gc0_entrylo1(entrylo_user_to_kvm(v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	case KVM_REG_MIPS_CP0_CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 		write_gc0_context(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 		if (!cpu_guest_has_contextconfig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 		write_gc0_contextconfig(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	case KVM_REG_MIPS_CP0_USERLOCAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 		if (!cpu_guest_has_userlocal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 		write_gc0_userlocal(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 		if (!cpu_guest_has_contextconfig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 		write_gc0_xcontextconfig(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	case KVM_REG_MIPS_CP0_PAGEMASK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 		write_gc0_pagemask(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 	case KVM_REG_MIPS_CP0_PAGEGRAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 		write_gc0_pagegrain(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 	case KVM_REG_MIPS_CP0_SEGCTL0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 		if (!cpu_guest_has_segments)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 		write_gc0_segctl0(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	case KVM_REG_MIPS_CP0_SEGCTL1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 		if (!cpu_guest_has_segments)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 		write_gc0_segctl1(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 	case KVM_REG_MIPS_CP0_SEGCTL2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 		if (!cpu_guest_has_segments)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 		write_gc0_segctl2(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	case KVM_REG_MIPS_CP0_PWBASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 		if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 		write_gc0_pwbase(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 	case KVM_REG_MIPS_CP0_PWFIELD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 		if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 		write_gc0_pwfield(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	case KVM_REG_MIPS_CP0_PWSIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 		if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 		write_gc0_pwsize(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 	case KVM_REG_MIPS_CP0_WIRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 		change_gc0_wired(MIPSR6_WIRED_WIRED, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	case KVM_REG_MIPS_CP0_PWCTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 		if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 		write_gc0_pwctl(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	case KVM_REG_MIPS_CP0_HWRENA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 		write_gc0_hwrena(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	case KVM_REG_MIPS_CP0_BADVADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 		write_gc0_badvaddr(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 	case KVM_REG_MIPS_CP0_BADINSTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 		if (!cpu_guest_has_badinstr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 		write_gc0_badinstr(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	case KVM_REG_MIPS_CP0_BADINSTRP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 		if (!cpu_guest_has_badinstrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 		write_gc0_badinstrp(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	case KVM_REG_MIPS_CP0_COUNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 		kvm_mips_write_count(vcpu, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 	case KVM_REG_MIPS_CP0_ENTRYHI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 		write_gc0_entryhi(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	case KVM_REG_MIPS_CP0_COMPARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 		kvm_mips_write_compare(vcpu, v, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	case KVM_REG_MIPS_CP0_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 		write_gc0_status(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	case KVM_REG_MIPS_CP0_INTCTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 		write_gc0_intctl(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	case KVM_REG_MIPS_CP0_CAUSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 		 * If the timer is stopped or started (DC bit) it must look
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 		 * atomic with changes to the timer interrupt pending bit (TI).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 		 * A timer interrupt should not happen in between.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 		if ((read_gc0_cause() ^ v) & CAUSEF_DC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 			if (v & CAUSEF_DC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 				/* disable timer first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 				kvm_mips_count_disable_cause(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 				change_gc0_cause((u32)~CAUSEF_DC, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 				/* enable timer last */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 				change_gc0_cause((u32)~CAUSEF_DC, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 				kvm_mips_count_enable_cause(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 			write_gc0_cause(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 	case KVM_REG_MIPS_CP0_EPC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 		write_gc0_epc(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 	case KVM_REG_MIPS_CP0_PRID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 		switch (boot_cpu_type()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 		case CPU_CAVIUM_OCTEON3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 			/* Octeon III has a guest.PRid, but its read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 			kvm_write_c0_guest_prid(cop0, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	case KVM_REG_MIPS_CP0_EBASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 		kvm_vz_write_gc0_ebase(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 	case KVM_REG_MIPS_CP0_CONFIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 		cur = read_gc0_config();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 		change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 		if (change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 			v = cur ^ change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 			write_gc0_config(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	case KVM_REG_MIPS_CP0_CONFIG1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 		if (!cpu_guest_has_conf1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 		cur = read_gc0_config1();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 		change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 		if (change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 			v = cur ^ change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 			write_gc0_config1(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	case KVM_REG_MIPS_CP0_CONFIG2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 		if (!cpu_guest_has_conf2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 		cur = read_gc0_config2();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 		change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 		if (change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 			v = cur ^ change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 			write_gc0_config2(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	case KVM_REG_MIPS_CP0_CONFIG3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 		if (!cpu_guest_has_conf3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 		cur = read_gc0_config3();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 		change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 		if (change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 			v = cur ^ change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 			write_gc0_config3(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	case KVM_REG_MIPS_CP0_CONFIG4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 		if (!cpu_guest_has_conf4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 		cur = read_gc0_config4();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 		change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 		if (change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 			v = cur ^ change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 			write_gc0_config4(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	case KVM_REG_MIPS_CP0_CONFIG5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 		if (!cpu_guest_has_conf5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 		cur = read_gc0_config5();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 		change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 		if (change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 			v = cur ^ change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 			write_gc0_config5(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	case KVM_REG_MIPS_CP0_CONFIG6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 		cur = kvm_read_sw_gc0_config6(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 		change = (cur ^ v) & kvm_vz_config6_user_wrmask(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 		if (change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 			v = cur ^ change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 			kvm_write_sw_gc0_config6(cop0, (int)v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 		if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 		idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 		if (idx >= ARRAY_SIZE(vcpu->arch.maar))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 		vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 	case KVM_REG_MIPS_CP0_MAARI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 		if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 		kvm_write_maari(vcpu, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 	case KVM_REG_MIPS_CP0_XCONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 		write_gc0_xcontext(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	case KVM_REG_MIPS_CP0_ERROREPC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 		write_gc0_errorepc(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 		idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 		if (!cpu_guest_has_kscr(idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 		switch (idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 		case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 			write_gc0_kscratch1(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 		case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 			write_gc0_kscratch2(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 		case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 			write_gc0_kscratch3(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 		case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 			write_gc0_kscratch4(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 		case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 			write_gc0_kscratch5(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 		case 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 			write_gc0_kscratch6(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 	case KVM_REG_MIPS_COUNT_CTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 		ret = kvm_mips_set_count_ctl(vcpu, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 	case KVM_REG_MIPS_COUNT_RESUME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 		ret = kvm_mips_set_count_resume(vcpu, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 	case KVM_REG_MIPS_COUNT_HZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 		ret = kvm_mips_set_count_hz(vcpu, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) #define guestid_cache(cpu)	(cpu_data[cpu].guestid_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	unsigned long guestid = guestid_cache(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	if (!(++guestid & GUESTID_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 		if (cpu_has_vtag_icache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 			flush_icache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 		if (!guestid)		/* fix version if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 			guestid = GUESTID_FIRST_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 		++guestid;		/* guestid 0 reserved for root */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 		/* start new guestid cycle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 		kvm_vz_local_flush_roottlb_all_guests();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 		kvm_vz_local_flush_guesttlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 	guestid_cache(cpu) = guestid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) /* Returns 1 if the guest TLB may be clobbered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 	if (!kvm_request_pending(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 		if (cpu_has_guestid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 			/* Drop all GuestIDs for this VCPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 			for_each_possible_cpu(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 				vcpu->arch.vzguestid[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 			/* This will clobber guest TLB contents too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 			ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 		 * For Root ASID Dealias (RAD) we don't do anything here, but we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 		 * still need the request to ensure we recheck asid_flush_mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 		 * We can still return 0 as only the root TLB will be affected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 		 * by a root ASID flush.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) static void kvm_vz_vcpu_save_wired(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 	unsigned int wired = read_gc0_wired();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 	struct kvm_mips_tlb *tlbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 	/* Expand the wired TLB array if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 	wired &= MIPSR6_WIRED_WIRED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 	if (wired > vcpu->arch.wired_tlb_limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 		tlbs = krealloc(vcpu->arch.wired_tlb, wired *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 				sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 		if (WARN_ON(!tlbs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 			/* Save whatever we can */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 			wired = vcpu->arch.wired_tlb_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 			vcpu->arch.wired_tlb = tlbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 			vcpu->arch.wired_tlb_limit = wired;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 	if (wired)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 		/* Save wired entries from the guest TLB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 		kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 	/* Invalidate any dropped entries since last time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 	for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 		vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 		vcpu->arch.wired_tlb[i].tlb_lo[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 		vcpu->arch.wired_tlb[i].tlb_lo[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 		vcpu->arch.wired_tlb[i].tlb_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	vcpu->arch.wired_tlb_used = wired;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) static void kvm_vz_vcpu_load_wired(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 	/* Load wired entries into the guest TLB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 	if (vcpu->arch.wired_tlb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 		kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 				     vcpu->arch.wired_tlb_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 	struct kvm *kvm = vcpu->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 	struct mm_struct *gpa_mm = &kvm->arch.gpa_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	bool migrated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	 * Are we entering guest context on a different CPU to last time?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 	 * If so, the VCPU's guest TLB state on this CPU may be stale.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	migrated = (vcpu->arch.last_exec_cpu != cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	vcpu->arch.last_exec_cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 	 * A vcpu's GuestID is set in GuestCtl1.ID when the vcpu is loaded and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 	 * remains set until another vcpu is loaded in.  As a rule GuestRID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 	 * remains zeroed when in root context unless the kernel is busy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	 * manipulating guest tlb entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	if (cpu_has_guestid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 		 * Check if our GuestID is of an older version and thus invalid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 		 * We also discard the stored GuestID if we've executed on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 		 * another CPU, as the guest mappings may have changed without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 		 * hypervisor knowledge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 		if (migrated ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 		    (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 					GUESTID_VERSION_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 			kvm_vz_get_new_guestid(cpu, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 			vcpu->arch.vzguestid[cpu] = guestid_cache(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 			trace_kvm_guestid_change(vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 						 vcpu->arch.vzguestid[cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 		/* Restore GuestID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 		change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 		 * The Guest TLB only stores a single guest's TLB state, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 		 * flush it if another VCPU has executed on this CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 		 * We also flush if we've executed on another CPU, as the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 		 * mappings may have changed without hypervisor knowledge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 		if (migrated || last_exec_vcpu[cpu] != vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 			kvm_vz_local_flush_guesttlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 		last_exec_vcpu[cpu] = vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 		 * Root ASID dealiases guest GPA mappings in the root TLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 		 * Allocate new root ASID if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 		if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 			get_new_mmu_context(gpa_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 			check_mmu_context(gpa_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 	struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 	bool migrated, all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 	 * Have we migrated to a different CPU?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 	 * If so, any old guest TLB state may be stale.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 	migrated = (vcpu->arch.last_sched_cpu != cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 	 * Was this the last VCPU to run on this CPU?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 	 * If not, any old guest state from this VCPU will have been clobbered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	all = migrated || (last_vcpu[cpu] != vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 	last_vcpu[cpu] = vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 	 * Restore CP0_Wired unconditionally as we clear it after use, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 	 * restore wired guest TLB entries (while in guest context).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 	kvm_restore_gc0_wired(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 	if (current->flags & PF_VCPU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 		tlbw_use_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 		kvm_vz_vcpu_load_tlb(vcpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 		kvm_vz_vcpu_load_wired(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 	 * Restore timer state regardless, as e.g. Cause.TI can change over time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 	 * if left unmaintained.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 	kvm_vz_restore_timer(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 	/* Set MC bit if we want to trace guest mode changes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 	if (kvm_trace_guest_mode_change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 		set_c0_guestctl0(MIPS_GCTL0_MC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 		clear_c0_guestctl0(MIPS_GCTL0_MC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 	/* Don't bother restoring registers multiple times unless necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 	if (!all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 	 * Restore config registers first, as some implementations restrict
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 	 * writes to other registers when the corresponding feature bits aren't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 	 * set. For example Status.CU1 cannot be set unless Config1.FP is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 	kvm_restore_gc0_config(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 	if (cpu_guest_has_conf1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 		kvm_restore_gc0_config1(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 	if (cpu_guest_has_conf2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 		kvm_restore_gc0_config2(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 	if (cpu_guest_has_conf3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 		kvm_restore_gc0_config3(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 	if (cpu_guest_has_conf4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 		kvm_restore_gc0_config4(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 	if (cpu_guest_has_conf5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 		kvm_restore_gc0_config5(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 	if (cpu_guest_has_conf6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 		kvm_restore_gc0_config6(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 	if (cpu_guest_has_conf7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 		kvm_restore_gc0_config7(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 	kvm_restore_gc0_index(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 	kvm_restore_gc0_entrylo0(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	kvm_restore_gc0_entrylo1(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 	kvm_restore_gc0_context(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	if (cpu_guest_has_contextconfig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 		kvm_restore_gc0_contextconfig(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 	kvm_restore_gc0_xcontext(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 	if (cpu_guest_has_contextconfig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 		kvm_restore_gc0_xcontextconfig(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 	kvm_restore_gc0_pagemask(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 	kvm_restore_gc0_pagegrain(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 	kvm_restore_gc0_hwrena(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 	kvm_restore_gc0_badvaddr(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 	kvm_restore_gc0_entryhi(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 	kvm_restore_gc0_status(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 	kvm_restore_gc0_intctl(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 	kvm_restore_gc0_epc(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 	kvm_vz_write_gc0_ebase(kvm_read_sw_gc0_ebase(cop0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 	if (cpu_guest_has_userlocal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 		kvm_restore_gc0_userlocal(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 	kvm_restore_gc0_errorepc(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 	/* restore KScratch registers if enabled in guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 	if (cpu_guest_has_conf4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 		if (cpu_guest_has_kscr(2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 			kvm_restore_gc0_kscratch1(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 		if (cpu_guest_has_kscr(3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 			kvm_restore_gc0_kscratch2(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 		if (cpu_guest_has_kscr(4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 			kvm_restore_gc0_kscratch3(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 		if (cpu_guest_has_kscr(5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 			kvm_restore_gc0_kscratch4(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 		if (cpu_guest_has_kscr(6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 			kvm_restore_gc0_kscratch5(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 		if (cpu_guest_has_kscr(7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 			kvm_restore_gc0_kscratch6(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 	if (cpu_guest_has_badinstr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 		kvm_restore_gc0_badinstr(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 	if (cpu_guest_has_badinstrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 		kvm_restore_gc0_badinstrp(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 	if (cpu_guest_has_segments) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 		kvm_restore_gc0_segctl0(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 		kvm_restore_gc0_segctl1(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 		kvm_restore_gc0_segctl2(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 	/* restore HTW registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 	if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 		kvm_restore_gc0_pwbase(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 		kvm_restore_gc0_pwfield(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 		kvm_restore_gc0_pwsize(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 		kvm_restore_gc0_pwctl(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 	/* restore Root.GuestCtl2 from unused Guest guestctl2 register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 	if (cpu_has_guestctl2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 		write_c0_guestctl2(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 			cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 	 * We should clear linked load bit to break interrupted atomics. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 	 * prevents a SC on the next VCPU from succeeding by matching a LL on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 	 * the previous VCPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 	if (vcpu->kvm->created_vcpus > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 		write_gc0_lladdr(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 	struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 	if (current->flags & PF_VCPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 		kvm_vz_vcpu_save_wired(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 	kvm_lose_fpu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 	kvm_save_gc0_index(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 	kvm_save_gc0_entrylo0(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 	kvm_save_gc0_entrylo1(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 	kvm_save_gc0_context(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 	if (cpu_guest_has_contextconfig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 		kvm_save_gc0_contextconfig(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 	kvm_save_gc0_xcontext(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 	if (cpu_guest_has_contextconfig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 		kvm_save_gc0_xcontextconfig(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 	kvm_save_gc0_pagemask(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 	kvm_save_gc0_pagegrain(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 	kvm_save_gc0_wired(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 	/* allow wired TLB entries to be overwritten */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 	clear_gc0_wired(MIPSR6_WIRED_WIRED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 	kvm_save_gc0_hwrena(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 	kvm_save_gc0_badvaddr(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 	kvm_save_gc0_entryhi(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 	kvm_save_gc0_status(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 	kvm_save_gc0_intctl(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 	kvm_save_gc0_epc(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 	kvm_write_sw_gc0_ebase(cop0, kvm_vz_read_gc0_ebase());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 	if (cpu_guest_has_userlocal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 		kvm_save_gc0_userlocal(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 	/* only save implemented config registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 	kvm_save_gc0_config(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 	if (cpu_guest_has_conf1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 		kvm_save_gc0_config1(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 	if (cpu_guest_has_conf2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 		kvm_save_gc0_config2(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 	if (cpu_guest_has_conf3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 		kvm_save_gc0_config3(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 	if (cpu_guest_has_conf4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 		kvm_save_gc0_config4(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 	if (cpu_guest_has_conf5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 		kvm_save_gc0_config5(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 	if (cpu_guest_has_conf6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 		kvm_save_gc0_config6(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 	if (cpu_guest_has_conf7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 		kvm_save_gc0_config7(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 	kvm_save_gc0_errorepc(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 	/* save KScratch registers if enabled in guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 	if (cpu_guest_has_conf4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 		if (cpu_guest_has_kscr(2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 			kvm_save_gc0_kscratch1(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 		if (cpu_guest_has_kscr(3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 			kvm_save_gc0_kscratch2(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 		if (cpu_guest_has_kscr(4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 			kvm_save_gc0_kscratch3(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 		if (cpu_guest_has_kscr(5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 			kvm_save_gc0_kscratch4(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 		if (cpu_guest_has_kscr(6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 			kvm_save_gc0_kscratch5(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 		if (cpu_guest_has_kscr(7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 			kvm_save_gc0_kscratch6(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 	if (cpu_guest_has_badinstr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 		kvm_save_gc0_badinstr(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 	if (cpu_guest_has_badinstrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 		kvm_save_gc0_badinstrp(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 	if (cpu_guest_has_segments) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 		kvm_save_gc0_segctl0(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 		kvm_save_gc0_segctl1(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 		kvm_save_gc0_segctl2(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 	/* save HTW registers if enabled in guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 	if (cpu_guest_has_ldpte || (cpu_guest_has_htw &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 	    kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 		kvm_save_gc0_pwbase(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 		kvm_save_gc0_pwfield(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 		kvm_save_gc0_pwsize(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 		kvm_save_gc0_pwctl(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 	kvm_vz_save_timer(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 	/* save Root.GuestCtl2 in unused Guest guestctl2 register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 	if (cpu_has_guestctl2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 		cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 			read_c0_guestctl2();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802)  * kvm_vz_resize_guest_vtlb() - Attempt to resize guest VTLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803)  * @size:	Number of guest VTLB entries (0 < @size <= root VTLB entries).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805)  * Attempt to resize the guest VTLB by writing guest Config registers. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806)  * necessary for cores with a shared root/guest TLB to avoid overlap with wired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807)  * entries in the root VTLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809)  * Returns:	The resulting guest VTLB size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 	unsigned int config4 = 0, ret = 0, limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 	/* Write MMUSize - 1 into guest Config registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 	if (cpu_guest_has_conf1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 		change_gc0_config1(MIPS_CONF1_TLBS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 				   (size - 1) << MIPS_CONF1_TLBS_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 	if (cpu_guest_has_conf4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 		config4 = read_gc0_config4();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 		if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 		    MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 			config4 &= ~MIPS_CONF4_VTLBSIZEEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 			config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 				MIPS_CONF4_VTLBSIZEEXT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 		} else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 			   MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 			config4 &= ~MIPS_CONF4_MMUSIZEEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 			config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 				MIPS_CONF4_MMUSIZEEXT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 		write_gc0_config4(config4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 	 * Set Guest.Wired.Limit = 0 (no limit up to Guest.MMUSize-1), unless it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 	 * would exceed Root.Wired.Limit (clearing Guest.Wired.Wired so write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 	 * not dropped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 	if (cpu_has_mips_r6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 		limit = (read_c0_wired() & MIPSR6_WIRED_LIMIT) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 						MIPSR6_WIRED_LIMIT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 		if (size - 1 <= limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 			limit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 		write_gc0_wired(limit << MIPSR6_WIRED_LIMIT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 	/* Read back MMUSize - 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 	back_to_back_c0_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 	if (cpu_guest_has_conf1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 		ret = (read_gc0_config1() & MIPS_CONF1_TLBS) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 						MIPS_CONF1_TLBS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 	if (config4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 		if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 		    MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 			ret |= ((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 				MIPS_CONF4_VTLBSIZEEXT_SHIFT) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 				MIPS_CONF1_TLBS_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 		else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 			 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 			ret |= ((config4 & MIPS_CONF4_MMUSIZEEXT) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 				MIPS_CONF4_MMUSIZEEXT_SHIFT) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 				MIPS_CONF1_TLBS_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 	return ret + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) static int kvm_vz_hardware_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 	unsigned int mmu_size, guest_mmu_size, ftlb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 	u64 guest_cvmctl, cvmvmconfig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 	switch (current_cpu_type()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 	case CPU_CAVIUM_OCTEON3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 		/* Set up guest timer/perfcount IRQ lines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 		guest_cvmctl = read_gc0_cvmctl();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 		guest_cvmctl &= ~CVMCTL_IPTI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 		guest_cvmctl |= 7ull << CVMCTL_IPTI_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 		guest_cvmctl &= ~CVMCTL_IPPCI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 		guest_cvmctl |= 6ull << CVMCTL_IPPCI_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 		write_gc0_cvmctl(guest_cvmctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 		cvmvmconfig = read_c0_cvmvmconfig();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 		/* No I/O hole translation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 		cvmvmconfig |= CVMVMCONF_DGHT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 		/* Halve the root MMU size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 		mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 			    >> CVMVMCONF_MMUSIZEM1_S) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 		guest_mmu_size = mmu_size / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 		mmu_size -= guest_mmu_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 		cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 		cvmvmconfig |= mmu_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 		write_c0_cvmvmconfig(cvmvmconfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 		/* Update our records */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 		current_cpu_data.tlbsize = mmu_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 		current_cpu_data.tlbsizevtlb = mmu_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 		current_cpu_data.guest.tlbsize = guest_mmu_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 		/* Flush moved entries in new (guest) context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 		kvm_vz_local_flush_guesttlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 		 * ImgTec cores tend to use a shared root/guest TLB. To avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 		 * overlap of root wired and guest entries, the guest TLB may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 		 * need resizing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 		mmu_size = current_cpu_data.tlbsizevtlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 		ftlb_size = current_cpu_data.tlbsize - mmu_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 		/* Try switching to maximum guest VTLB size for flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 		guest_mmu_size = kvm_vz_resize_guest_vtlb(mmu_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 		current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 		kvm_vz_local_flush_guesttlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 		 * Reduce to make space for root wired entries and at least 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 		 * root non-wired entries. This does assume that long-term wired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 		 * entries won't be added later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 		guest_mmu_size = mmu_size - num_wired_entries() - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 		guest_mmu_size = kvm_vz_resize_guest_vtlb(guest_mmu_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 		current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 		 * Write the VTLB size, but if another CPU has already written,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 		 * check it matches or we won't provide a consistent view to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 		 * guest. If this ever happens it suggests an asymmetric number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 		 * of wired entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 		if (cmpxchg(&kvm_vz_guest_vtlb_size, 0, guest_mmu_size) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 		    WARN(guest_mmu_size != kvm_vz_guest_vtlb_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 			 "Available guest VTLB size mismatch"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 	 * Enable virtualization features granting guest direct control of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 	 * certain features:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 	 * CP0=1:	Guest coprocessor 0 context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 	 * AT=Guest:	Guest MMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 	 * CG=1:	Hit (virtual address) CACHE operations (optional).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 	 * CF=1:	Guest Config registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 	 * CGI=1:	Indexed flush CACHE operations (optional).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 	write_c0_guestctl0(MIPS_GCTL0_CP0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 			   (MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 			   MIPS_GCTL0_CG | MIPS_GCTL0_CF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 	if (cpu_has_guestctl0ext) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 		if (current_cpu_type() != CPU_LOONGSON64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 			set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 			clear_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 	if (cpu_has_guestid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 		write_c0_guestctl1(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 		kvm_vz_local_flush_roottlb_all_guests();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 		GUESTID_MASK = current_cpu_data.guestid_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 		GUESTID_FIRST_VERSION = GUESTID_MASK + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 		GUESTID_VERSION_MASK = ~GUESTID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 		current_cpu_data.guestid_cache = GUESTID_FIRST_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 	/* clear any pending injected virtual guest interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 	if (cpu_has_guestctl2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 		clear_c0_guestctl2(0x3f << 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) #ifdef CONFIG_CPU_LOONGSON64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 	/* Control guest CCA attribute */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 	if (cpu_has_csr())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 		csr_writel(csr_readl(0xffffffec) | 0x1, 0xffffffec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) static void kvm_vz_hardware_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 	u64 cvmvmconfig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 	unsigned int mmu_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 	/* Flush any remaining guest TLB entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 	kvm_vz_local_flush_guesttlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 	switch (current_cpu_type()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 	case CPU_CAVIUM_OCTEON3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 		 * Allocate whole TLB for root. Existing guest TLB entries will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 		 * change ownership to the root TLB. We should be safe though as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 		 * they've already been flushed above while in guest TLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 		cvmvmconfig = read_c0_cvmvmconfig();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 		mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 			    >> CVMVMCONF_MMUSIZEM1_S) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 		cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 		cvmvmconfig |= mmu_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 		write_c0_cvmvmconfig(cvmvmconfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 		/* Update our records */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 		current_cpu_data.tlbsize = mmu_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 		current_cpu_data.tlbsizevtlb = mmu_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 		current_cpu_data.guest.tlbsize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 		/* Flush moved entries in new (root) context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 		local_flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 	if (cpu_has_guestid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 		write_c0_guestctl1(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 		kvm_vz_local_flush_roottlb_all_guests();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) static int kvm_vz_check_extension(struct kvm *kvm, long ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 	switch (ext) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 	case KVM_CAP_MIPS_VZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 		/* we wouldn't be here unless cpu_has_vz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 		r = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 	case KVM_CAP_MIPS_64BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 		/* We support 64-bit registers/operations and addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 		r = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 	case KVM_CAP_IOEVENTFD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 		r = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 		r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) static int kvm_vz_vcpu_init(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 	for_each_possible_cpu(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 		vcpu->arch.vzguestid[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 	 * If the VCPU is freed and reused as another VCPU, we don't want the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 	 * matching pointer wrongly hanging around in last_vcpu[] or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 	 * last_exec_vcpu[].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 		if (last_vcpu[cpu] == vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 			last_vcpu[cpu] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 		if (last_exec_vcpu[cpu] == vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 			last_exec_vcpu[cpu] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 	struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 	unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 	 * Start off the timer at the same frequency as the host timer, but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 	 * soft timer doesn't handle frequencies greater than 1GHz yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 	if (mips_hpt_frequency && mips_hpt_frequency <= NSEC_PER_SEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 		count_hz = mips_hpt_frequency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 	kvm_mips_init_count(vcpu, count_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 	 * Initialize guest register state to valid architectural reset state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 	/* PageGrain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 	if (cpu_has_mips_r5 || cpu_has_mips_r6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 		kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 	/* Wired */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 	if (cpu_has_mips_r6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 		kvm_write_sw_gc0_wired(cop0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 				       read_gc0_wired() & MIPSR6_WIRED_LIMIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 	/* Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) 	kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) 	if (cpu_has_mips_r5 || cpu_has_mips_r6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 		kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) 	/* IntCtl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 	kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 				(INTCTLF_IPFDC | INTCTLF_IPPCI | INTCTLF_IPTI));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 	/* PRId */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 	kvm_write_sw_gc0_prid(cop0, boot_cpu_data.processor_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 	/* EBase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 	kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 	/* Config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 	kvm_save_gc0_config(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 	/* architecturally writable (e.g. from guest) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 	kvm_change_sw_gc0_config(cop0, CONF_CM_CMASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 				 _page_cachable_default >> _CACHE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 	/* architecturally read only, but maybe writable from root */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 	kvm_change_sw_gc0_config(cop0, MIPS_CONF_MT, read_c0_config());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 	if (cpu_guest_has_conf1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 		kvm_set_sw_gc0_config(cop0, MIPS_CONF_M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 		/* Config1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 		kvm_save_gc0_config1(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 		/* architecturally read only, but maybe writable from root */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 		kvm_clear_sw_gc0_config1(cop0, MIPS_CONF1_C2	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 					       MIPS_CONF1_MD	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 					       MIPS_CONF1_PC	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 					       MIPS_CONF1_WR	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 					       MIPS_CONF1_CA	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 					       MIPS_CONF1_FP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 	if (cpu_guest_has_conf2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 		kvm_set_sw_gc0_config1(cop0, MIPS_CONF_M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 		/* Config2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 		kvm_save_gc0_config2(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 	if (cpu_guest_has_conf3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 		kvm_set_sw_gc0_config2(cop0, MIPS_CONF_M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 		/* Config3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 		kvm_save_gc0_config3(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 		/* architecturally writable (e.g. from guest) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 		kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_ISA_OE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 		/* architecturally read only, but maybe writable from root */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 		kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_MSA	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 					       MIPS_CONF3_BPG	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 					       MIPS_CONF3_ULRI	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 					       MIPS_CONF3_DSP	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 					       MIPS_CONF3_CTXTC	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 					       MIPS_CONF3_ITL	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 					       MIPS_CONF3_LPA	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 					       MIPS_CONF3_VEIC	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 					       MIPS_CONF3_VINT	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 					       MIPS_CONF3_SP	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 					       MIPS_CONF3_CDMM	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 					       MIPS_CONF3_MT	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 					       MIPS_CONF3_SM	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 					       MIPS_CONF3_TL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 	if (cpu_guest_has_conf4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 		kvm_set_sw_gc0_config3(cop0, MIPS_CONF_M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 		/* Config4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 		kvm_save_gc0_config4(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 	if (cpu_guest_has_conf5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 		kvm_set_sw_gc0_config4(cop0, MIPS_CONF_M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 		/* Config5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 		kvm_save_gc0_config5(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 		/* architecturally writable (e.g. from guest) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 		kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_K	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 					       MIPS_CONF5_CV	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 					       MIPS_CONF5_MSAEN	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 					       MIPS_CONF5_UFE	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 					       MIPS_CONF5_FRE	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) 					       MIPS_CONF5_SBRI	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 					       MIPS_CONF5_UFR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 		/* architecturally read only, but maybe writable from root */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 		kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 	if (cpu_guest_has_contextconfig) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 		/* ContextConfig */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 		kvm_write_sw_gc0_contextconfig(cop0, 0x007ffff0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 		/* XContextConfig */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 		/* bits SEGBITS-13+3:4 set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 		kvm_write_sw_gc0_xcontextconfig(cop0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 					((1ull << (cpu_vmbits - 13)) - 1) << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 	/* Implementation dependent, use the legacy layout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 	if (cpu_guest_has_segments) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 		/* SegCtl0, SegCtl1, SegCtl2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 		kvm_write_sw_gc0_segctl0(cop0, 0x00200010);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 		kvm_write_sw_gc0_segctl1(cop0, 0x00000002 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 				(_page_cachable_default >> _CACHE_SHIFT) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 						(16 + MIPS_SEGCFG_C_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 		kvm_write_sw_gc0_segctl2(cop0, 0x00380438);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 	/* reset HTW registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 	if (cpu_guest_has_htw && (cpu_has_mips_r5 || cpu_has_mips_r6)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 		/* PWField */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 		kvm_write_sw_gc0_pwfield(cop0, 0x0c30c302);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 		/* PWSize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 		kvm_write_sw_gc0_pwsize(cop0, 1 << MIPS_PWSIZE_PTW_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 	/* start with no pending virtual guest interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 	if (cpu_has_guestctl2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 		cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 	/* Put PC at reset vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 	vcpu->arch.pc = CKSEG1ADDR(0x1fc00000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) static void kvm_vz_flush_shadow_all(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 	if (cpu_has_guestid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 		/* Flush GuestID for each VCPU individually */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 		kvm_flush_remote_tlbs(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 		 * For each CPU there is a single GPA ASID used by all VCPUs in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 		 * the VM, so it doesn't make sense for the VCPUs to handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 		 * invalidation of these ASIDs individually.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 		 * Instead mark all CPUs as needing ASID invalidation in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 		 * asid_flush_mask, and just use kvm_flush_remote_tlbs(kvm) to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 		 * kick any running VCPUs so they check asid_flush_mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 		cpumask_setall(&kvm->arch.asid_flush_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 		kvm_flush_remote_tlbs(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) static void kvm_vz_flush_shadow_memslot(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 					const struct kvm_memory_slot *slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 	kvm_vz_flush_shadow_all(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) static void kvm_vz_vcpu_reenter(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 	int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 	int preserve_guest_tlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 	preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 	if (preserve_guest_tlb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 		kvm_vz_vcpu_save_wired(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 	kvm_vz_vcpu_load_tlb(vcpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) 	if (preserve_guest_tlb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 		kvm_vz_vcpu_load_wired(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) static int kvm_vz_vcpu_run(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 	int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 	kvm_vz_acquire_htimer(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 	/* Check if we have any exceptions/interrupts pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 	kvm_mips_deliver_interrupts(vcpu, read_gc0_cause());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 	kvm_vz_check_requests(vcpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) 	kvm_vz_vcpu_load_tlb(vcpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 	kvm_vz_vcpu_load_wired(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 	r = vcpu->arch.vcpu_run(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 	kvm_vz_vcpu_save_wired(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) static struct kvm_mips_callbacks kvm_vz_callbacks = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 	.handle_cop_unusable = kvm_trap_vz_handle_cop_unusable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 	.handle_tlb_mod = kvm_trap_vz_handle_tlb_st_miss,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 	.handle_tlb_ld_miss = kvm_trap_vz_handle_tlb_ld_miss,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 	.handle_tlb_st_miss = kvm_trap_vz_handle_tlb_st_miss,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 	.handle_addr_err_st = kvm_trap_vz_no_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 	.handle_addr_err_ld = kvm_trap_vz_no_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 	.handle_syscall = kvm_trap_vz_no_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 	.handle_res_inst = kvm_trap_vz_no_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 	.handle_break = kvm_trap_vz_no_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 	.handle_msa_disabled = kvm_trap_vz_handle_msa_disabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 	.handle_guest_exit = kvm_trap_vz_handle_guest_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 	.hardware_enable = kvm_vz_hardware_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 	.hardware_disable = kvm_vz_hardware_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 	.check_extension = kvm_vz_check_extension,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 	.vcpu_init = kvm_vz_vcpu_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 	.vcpu_uninit = kvm_vz_vcpu_uninit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 	.vcpu_setup = kvm_vz_vcpu_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 	.flush_shadow_all = kvm_vz_flush_shadow_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 	.flush_shadow_memslot = kvm_vz_flush_shadow_memslot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 	.gva_to_gpa = kvm_vz_gva_to_gpa_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 	.queue_timer_int = kvm_vz_queue_timer_int_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 	.dequeue_timer_int = kvm_vz_dequeue_timer_int_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 	.queue_io_int = kvm_vz_queue_io_int_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) 	.dequeue_io_int = kvm_vz_dequeue_io_int_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 	.irq_deliver = kvm_vz_irq_deliver_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 	.irq_clear = kvm_vz_irq_clear_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 	.num_regs = kvm_vz_num_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 	.copy_reg_indices = kvm_vz_copy_reg_indices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 	.get_one_reg = kvm_vz_get_one_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) 	.set_one_reg = kvm_vz_set_one_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) 	.vcpu_load = kvm_vz_vcpu_load,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 	.vcpu_put = kvm_vz_vcpu_put,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) 	.vcpu_run = kvm_vz_vcpu_run,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 	.vcpu_reenter = kvm_vz_vcpu_reenter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) 	if (!cpu_has_vz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 	 * VZ requires at least 2 KScratch registers, so it should have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 	 * possible to allocate pgd_reg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 	if (WARN(pgd_reg == -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 		 "pgd_reg not allocated even though cpu_has_vz\n"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 	pr_info("Starting KVM with MIPS VZ extensions\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 	*install_callbacks = &kvm_vz_callbacks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) }