Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * License.  See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Authors: Sanjay Lal <sanjayl@kymasys.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <asm/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include "interrupt.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 	gpa_t gpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 	gva_t kseg = KSEGX(gva);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 	gva_t gkseg = KVM_GUEST_KSEGX(gva);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 	if ((kseg == CKSEG0) || (kseg == CKSEG1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 		gpa = CPHYSADDR(gva);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 	else if (gkseg == KVM_GUEST_KSEG0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 		gpa = KVM_GUEST_CPHYSADDR(gva);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 		kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 		kvm_mips_dump_host_tlbs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 		gpa = KVM_INVALID_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	return gpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) static int kvm_trap_emul_no_handler(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	u32 __user *opc = (u32 __user *) vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	u32 cause = vcpu->arch.host_cp0_cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	u32 inst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	 *  Fetch the instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	if (cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 		opc += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	kvm_get_badinstr(opc, vcpu, &inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 		exccode, opc, inst, badvaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 		kvm_read_c0_guest_status(vcpu->arch.cop0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	kvm_arch_vcpu_dump_regs(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	return RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	u32 __user *opc = (u32 __user *) vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	u32 cause = vcpu->arch.host_cp0_cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	int ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 		/* FPU Unusable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 		if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 		    (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 			 * Unusable/no FPU in guest:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 			 * deliver guest COP1 Unusable Exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 			er = kvm_mips_emulate_fpu_exc(cause, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 			/* Restore FPU state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 			kvm_own_fpu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 			er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 		er = kvm_mips_emulate_inst(cause, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	switch (er) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	case EMULATE_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 		ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	case EMULATE_FAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 		ret = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	case EMULATE_WAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 		vcpu->run->exit_reason = KVM_EXIT_INTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 		ret = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	case EMULATE_HYPERCALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		ret = kvm_mips_handle_hypcall(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	enum emulation_result er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	union mips_instruction inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	/* A code fetch fault doesn't count as an MMIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	if (kvm_is_ifetch_fault(&vcpu->arch)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		return RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	/* Fetch the instruction. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	if (cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 		opc += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	err = kvm_get_badinstr(opc, vcpu, &inst.word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		return RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	/* Emulate the load */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	er = kvm_mips_emulate_load(inst, cause, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	if (er == EMULATE_FAIL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		kvm_err("Emulate load from MMIO space failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 		vcpu->run->exit_reason = KVM_EXIT_MMIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	return RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	enum emulation_result er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	union mips_instruction inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	/* Fetch the instruction. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	if (cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		opc += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	err = kvm_get_badinstr(opc, vcpu, &inst.word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 		return RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	/* Emulate the store */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	er = kvm_mips_emulate_store(inst, cause, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	if (er == EMULATE_FAIL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		kvm_err("Emulate store to MMIO space failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 		vcpu->run->exit_reason = KVM_EXIT_MMIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	return RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) static int kvm_mips_bad_access(u32 cause, u32 *opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 			       struct kvm_vcpu *vcpu, bool store)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	if (store)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		return kvm_mips_bad_store(cause, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		return kvm_mips_bad_load(cause, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	u32 __user *opc = (u32 __user *) vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	u32 cause = vcpu->arch.host_cp0_cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	struct kvm_mips_tlb *tlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	unsigned long entryhi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	    || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 		 * First find the mapping in the guest TLB. If the failure to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		 * write was due to the guest TLB, it should be up to the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		 * to handle it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		entryhi = (badvaddr & VPN2_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 			  (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		 * These should never happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		 * They would indicate stale host TLB entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		if (unlikely(index < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 			vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 			return RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		tlb = vcpu->arch.guest_tlb + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		if (unlikely(!TLB_IS_VALID(*tlb, badvaddr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 			vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 			return RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		 * Guest entry not dirty? That would explain the TLB modified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		 * exception. Relay that on to the guest so it can handle it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		if (!TLB_IS_DIRTY(*tlb, badvaddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 			kvm_mips_emulate_tlbmod(cause, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 			return RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, badvaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 							 true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 			/* Not writable, needs handling as MMIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 			return kvm_mips_bad_store(cause, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		return RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, true) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 			/* Not writable, needs handling as MMIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 			return kvm_mips_bad_store(cause, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		return RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		/* host kernel addresses are all handled as MMIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		return kvm_mips_bad_store(cause, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	struct kvm_run *run = vcpu->run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	u32 __user *opc = (u32 __user *) vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	u32 cause = vcpu->arch.host_cp0_cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	int ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	    && KVM_GUEST_KERNEL_MODE(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 			ret = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	} else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		   || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 			  store ? "ST" : "LD", cause, opc, badvaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		 * User Address (UA) fault, this could happen if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		 * (1) TLB entry not present/valid in both Guest and shadow host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		 *     TLBs, in this case we pass on the fault to the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		 *     kernel and let it handle it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		 * (2) TLB entry is present in the Guest TLB but not in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		 *     shadow, in this case we inject the TLB from the Guest TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		 *     into the shadow host TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		er = kvm_mips_handle_tlbmiss(cause, opc, vcpu, store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		if (er == EMULATE_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 			ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 			ret = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		 * All KSEG0 faults are handled by KVM, as the guest kernel does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		 * not expect to ever get them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, store) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 			ret = kvm_mips_bad_access(cause, opc, vcpu, store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	} else if (KVM_GUEST_KERNEL_MODE(vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		   && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		 * With EVA we may get a TLB exception instead of an address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		 * error when the guest performs MMIO to KSeg1 addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		ret = kvm_mips_bad_access(cause, opc, vcpu, store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 			store ? "ST" : "LD", cause, opc, badvaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		kvm_mips_dump_host_tlbs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		kvm_arch_vcpu_dump_regs(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		ret = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	return kvm_trap_emul_handle_tlb_miss(vcpu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	return kvm_trap_emul_handle_tlb_miss(vcpu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	u32 __user *opc = (u32 __user *) vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	u32 cause = vcpu->arch.host_cp0_cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	int ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	if (KVM_GUEST_KERNEL_MODE(vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	    && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		ret = kvm_mips_bad_store(cause, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 			cause, opc, badvaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		ret = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	u32 __user *opc = (u32 __user *) vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	u32 cause = vcpu->arch.host_cp0_cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	int ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		ret = kvm_mips_bad_load(cause, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 			cause, opc, badvaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		ret = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	u32 __user *opc = (u32 __user *) vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	u32 cause = vcpu->arch.host_cp0_cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	int ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	er = kvm_mips_emulate_syscall(cause, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	if (er == EMULATE_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		ret = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	u32 __user *opc = (u32 __user *) vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	u32 cause = vcpu->arch.host_cp0_cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	int ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	er = kvm_mips_handle_ri(cause, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	if (er == EMULATE_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		ret = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	u32 __user *opc = (u32 __user *) vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	u32 cause = vcpu->arch.host_cp0_cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	int ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	er = kvm_mips_emulate_bp_exc(cause, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	if (er == EMULATE_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		ret = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	u32 __user *opc = (u32 __user *)vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	u32 cause = vcpu->arch.host_cp0_cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	int ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	er = kvm_mips_emulate_trap_exc(cause, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	if (er == EMULATE_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		ret = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	u32 __user *opc = (u32 __user *)vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	u32 cause = vcpu->arch.host_cp0_cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	int ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	er = kvm_mips_emulate_msafpe_exc(cause, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	if (er == EMULATE_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		ret = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	u32 __user *opc = (u32 __user *)vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	u32 cause = vcpu->arch.host_cp0_cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	int ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	er = kvm_mips_emulate_fpe_exc(cause, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	if (er == EMULATE_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		ret = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457)  * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458)  * @vcpu:	Virtual CPU context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460)  * Handle when the guest attempts to use MSA when it is disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	u32 __user *opc = (u32 __user *) vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	u32 cause = vcpu->arch.host_cp0_cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	int ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	    (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		 * No MSA in guest, or FPU enabled and not in FR=1 mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		 * guest reserved instruction exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		er = kvm_mips_emulate_ri_exc(cause, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	} else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		/* MSA disabled by guest, guest MSA disabled exception */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		er = kvm_mips_emulate_msadis_exc(cause, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		/* Restore MSA/FPU state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		kvm_own_msa(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	switch (er) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	case EMULATE_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		ret = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	case EMULATE_FAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		ret = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) static int kvm_trap_emul_hardware_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) static void kvm_trap_emul_hardware_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) static int kvm_trap_emul_check_extension(struct kvm *kvm, long ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	switch (ext) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	case KVM_CAP_MIPS_TE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		r = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	case KVM_CAP_IOEVENTFD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		r = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	 * Allocate GVA -> HPA page tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	 * MIPS doesn't use the mm_struct pointer argument.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	kern_mm->pgd = pgd_alloc(kern_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	if (!kern_mm->pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	user_mm->pgd = pgd_alloc(user_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	if (!user_mm->pgd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		pgd_free(kern_mm, kern_mm->pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) static void kvm_mips_emul_free_gva_pt(pgd_t *pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	/* Don't free host kernel page tables copied from init_mm.pgd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	const unsigned long end = 0x80000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	unsigned long pgd_va, pud_va, pmd_va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	int i, j, k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	for (i = 0; i < USER_PTRS_PER_PGD; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		if (pgd_none(pgd[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		pgd_va = (unsigned long)i << PGDIR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		if (pgd_va >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		p4d = p4d_offset(pgd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		pud = pud_offset(p4d + i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		for (j = 0; j < PTRS_PER_PUD; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 			if (pud_none(pud[j]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 			pud_va = pgd_va | ((unsigned long)j << PUD_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 			if (pud_va >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 			pmd = pmd_offset(pud + j, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 			for (k = 0; k < PTRS_PER_PMD; k++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 				if (pmd_none(pmd[k]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 				pmd_va = pud_va | (k << PMD_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 				if (pmd_va >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 				pte = pte_offset_kernel(pmd + k, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 				pte_free_kernel(NULL, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 			pmd_free(NULL, pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		pud_free(NULL, pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	pgd_free(NULL, pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) static void kvm_trap_emul_vcpu_uninit(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	kvm_mips_emul_free_gva_pt(vcpu->arch.guest_kernel_mm.pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	kvm_mips_emul_free_gva_pt(vcpu->arch.guest_user_mm.pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	u32 config, config1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	int vcpu_id = vcpu->vcpu_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	/* Start off the timer at 100 MHz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	kvm_mips_init_count(vcpu, 100*1000*1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	 * Arch specific stuff, set up config registers properly so that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	 * guest will come up as expected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) #ifndef CONFIG_CPU_MIPSR6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	/* r2-r5, simulate a MIPS 24kc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	kvm_write_c0_guest_prid(cop0, 0x00019300);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	/* r6+, simulate a generic QEMU machine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	kvm_write_c0_guest_prid(cop0, 0x00010000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	 * Have config1, Cacheable, noncoherent, write-back, write allocate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	 * Endianness, arch revision & virtually tagged icache should match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	 * host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	config = read_c0_config() & MIPS_CONF_AR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) #ifdef CONFIG_CPU_BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	config |= CONF_BE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	if (cpu_has_vtag_icache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		config |= MIPS_CONF_VI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	kvm_write_c0_guest_config(cop0, config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	/* Read the cache characteristics from the host Config1 Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	config1 = (read_c0_config1() & ~0x7f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	/* DCache line size not correctly reported in Config1 on Octeon CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	if (cpu_dcache_line_size()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		config1 &= ~MIPS_CONF1_DL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		config1 |= ((ilog2(cpu_dcache_line_size()) - 1) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 			    MIPS_CONF1_DL_SHF) & MIPS_CONF1_DL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	/* Set up MMU size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	config1 &= ~(0x3f << 25);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	/* We unset some bits that we aren't emulating */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		     MIPS_CONF1_WR | MIPS_CONF1_CA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	kvm_write_c0_guest_config1(cop0, config1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	/* Have config3, no tertiary/secondary caches implemented */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	/* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	/* Have config4, UserLocal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	/* Have config5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	/* No config6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	kvm_write_c0_guest_config5(cop0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	/* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	/* Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	kvm_write_c0_guest_status(cop0, ST0_BEV | ST0_ERL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	 * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	kvm_write_c0_guest_intctl(cop0, 0xFC000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	/* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 				       (vcpu_id & MIPS_EBASE_CPUNUM));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	/* Put PC at guest reset vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	vcpu->arch.pc = KVM_GUEST_CKSEG1ADDR(0x1fc00000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) static void kvm_trap_emul_flush_shadow_all(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	/* Flush GVA page tables and invalidate GVA ASIDs on all VCPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	kvm_flush_remote_tlbs(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) static void kvm_trap_emul_flush_shadow_memslot(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 					const struct kvm_memory_slot *slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	kvm_trap_emul_flush_shadow_all(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) static u64 kvm_trap_emul_get_one_regs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	KVM_REG_MIPS_CP0_INDEX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	KVM_REG_MIPS_CP0_ENTRYLO0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	KVM_REG_MIPS_CP0_ENTRYLO1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	KVM_REG_MIPS_CP0_CONTEXT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	KVM_REG_MIPS_CP0_USERLOCAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	KVM_REG_MIPS_CP0_PAGEMASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	KVM_REG_MIPS_CP0_WIRED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	KVM_REG_MIPS_CP0_HWRENA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	KVM_REG_MIPS_CP0_BADVADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	KVM_REG_MIPS_CP0_COUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	KVM_REG_MIPS_CP0_ENTRYHI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	KVM_REG_MIPS_CP0_COMPARE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	KVM_REG_MIPS_CP0_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	KVM_REG_MIPS_CP0_INTCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	KVM_REG_MIPS_CP0_CAUSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	KVM_REG_MIPS_CP0_EPC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	KVM_REG_MIPS_CP0_PRID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	KVM_REG_MIPS_CP0_EBASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	KVM_REG_MIPS_CP0_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	KVM_REG_MIPS_CP0_CONFIG1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	KVM_REG_MIPS_CP0_CONFIG2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	KVM_REG_MIPS_CP0_CONFIG3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	KVM_REG_MIPS_CP0_CONFIG4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	KVM_REG_MIPS_CP0_CONFIG5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	KVM_REG_MIPS_CP0_CONFIG7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	KVM_REG_MIPS_CP0_ERROREPC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	KVM_REG_MIPS_CP0_KSCRATCH1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	KVM_REG_MIPS_CP0_KSCRATCH2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	KVM_REG_MIPS_CP0_KSCRATCH3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	KVM_REG_MIPS_CP0_KSCRATCH4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	KVM_REG_MIPS_CP0_KSCRATCH5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	KVM_REG_MIPS_CP0_KSCRATCH6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	KVM_REG_MIPS_COUNT_CTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	KVM_REG_MIPS_COUNT_RESUME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	KVM_REG_MIPS_COUNT_HZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	return ARRAY_SIZE(kvm_trap_emul_get_one_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 					  u64 __user *indices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	if (copy_to_user(indices, kvm_trap_emul_get_one_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 			 sizeof(kvm_trap_emul_get_one_regs)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	indices += ARRAY_SIZE(kvm_trap_emul_get_one_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 				     const struct kvm_one_reg *reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 				     s64 *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	switch (reg->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	case KVM_REG_MIPS_CP0_INDEX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		*v = (long)kvm_read_c0_guest_index(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	case KVM_REG_MIPS_CP0_ENTRYLO0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		*v = kvm_read_c0_guest_entrylo0(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	case KVM_REG_MIPS_CP0_ENTRYLO1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		*v = kvm_read_c0_guest_entrylo1(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	case KVM_REG_MIPS_CP0_CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		*v = (long)kvm_read_c0_guest_context(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	case KVM_REG_MIPS_CP0_USERLOCAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		*v = (long)kvm_read_c0_guest_userlocal(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	case KVM_REG_MIPS_CP0_PAGEMASK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		*v = (long)kvm_read_c0_guest_pagemask(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	case KVM_REG_MIPS_CP0_WIRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		*v = (long)kvm_read_c0_guest_wired(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	case KVM_REG_MIPS_CP0_HWRENA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		*v = (long)kvm_read_c0_guest_hwrena(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	case KVM_REG_MIPS_CP0_BADVADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		*v = (long)kvm_read_c0_guest_badvaddr(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	case KVM_REG_MIPS_CP0_ENTRYHI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		*v = (long)kvm_read_c0_guest_entryhi(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	case KVM_REG_MIPS_CP0_COMPARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		*v = (long)kvm_read_c0_guest_compare(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	case KVM_REG_MIPS_CP0_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		*v = (long)kvm_read_c0_guest_status(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	case KVM_REG_MIPS_CP0_INTCTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		*v = (long)kvm_read_c0_guest_intctl(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	case KVM_REG_MIPS_CP0_CAUSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		*v = (long)kvm_read_c0_guest_cause(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	case KVM_REG_MIPS_CP0_EPC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		*v = (long)kvm_read_c0_guest_epc(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	case KVM_REG_MIPS_CP0_PRID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		*v = (long)kvm_read_c0_guest_prid(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	case KVM_REG_MIPS_CP0_EBASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		*v = (long)kvm_read_c0_guest_ebase(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	case KVM_REG_MIPS_CP0_CONFIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		*v = (long)kvm_read_c0_guest_config(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	case KVM_REG_MIPS_CP0_CONFIG1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		*v = (long)kvm_read_c0_guest_config1(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	case KVM_REG_MIPS_CP0_CONFIG2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		*v = (long)kvm_read_c0_guest_config2(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	case KVM_REG_MIPS_CP0_CONFIG3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		*v = (long)kvm_read_c0_guest_config3(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	case KVM_REG_MIPS_CP0_CONFIG4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		*v = (long)kvm_read_c0_guest_config4(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	case KVM_REG_MIPS_CP0_CONFIG5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		*v = (long)kvm_read_c0_guest_config5(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	case KVM_REG_MIPS_CP0_CONFIG7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		*v = (long)kvm_read_c0_guest_config7(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	case KVM_REG_MIPS_CP0_COUNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		*v = kvm_mips_read_count(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	case KVM_REG_MIPS_COUNT_CTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		*v = vcpu->arch.count_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	case KVM_REG_MIPS_COUNT_RESUME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		*v = ktime_to_ns(vcpu->arch.count_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	case KVM_REG_MIPS_COUNT_HZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		*v = vcpu->arch.count_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	case KVM_REG_MIPS_CP0_ERROREPC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		*v = (long)kvm_read_c0_guest_errorepc(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	case KVM_REG_MIPS_CP0_KSCRATCH1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		*v = (long)kvm_read_c0_guest_kscratch1(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	case KVM_REG_MIPS_CP0_KSCRATCH2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		*v = (long)kvm_read_c0_guest_kscratch2(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	case KVM_REG_MIPS_CP0_KSCRATCH3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		*v = (long)kvm_read_c0_guest_kscratch3(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	case KVM_REG_MIPS_CP0_KSCRATCH4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		*v = (long)kvm_read_c0_guest_kscratch4(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	case KVM_REG_MIPS_CP0_KSCRATCH5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		*v = (long)kvm_read_c0_guest_kscratch5(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	case KVM_REG_MIPS_CP0_KSCRATCH6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		*v = (long)kvm_read_c0_guest_kscratch6(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 				     const struct kvm_one_reg *reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 				     s64 v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	unsigned int cur, change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	switch (reg->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	case KVM_REG_MIPS_CP0_INDEX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		kvm_write_c0_guest_index(cop0, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	case KVM_REG_MIPS_CP0_ENTRYLO0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		kvm_write_c0_guest_entrylo0(cop0, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	case KVM_REG_MIPS_CP0_ENTRYLO1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		kvm_write_c0_guest_entrylo1(cop0, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	case KVM_REG_MIPS_CP0_CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		kvm_write_c0_guest_context(cop0, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	case KVM_REG_MIPS_CP0_USERLOCAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		kvm_write_c0_guest_userlocal(cop0, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	case KVM_REG_MIPS_CP0_PAGEMASK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		kvm_write_c0_guest_pagemask(cop0, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	case KVM_REG_MIPS_CP0_WIRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		kvm_write_c0_guest_wired(cop0, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	case KVM_REG_MIPS_CP0_HWRENA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		kvm_write_c0_guest_hwrena(cop0, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	case KVM_REG_MIPS_CP0_BADVADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		kvm_write_c0_guest_badvaddr(cop0, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	case KVM_REG_MIPS_CP0_ENTRYHI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		kvm_write_c0_guest_entryhi(cop0, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	case KVM_REG_MIPS_CP0_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		kvm_write_c0_guest_status(cop0, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	case KVM_REG_MIPS_CP0_INTCTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		/* No VInt, so no VS, read-only for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	case KVM_REG_MIPS_CP0_EPC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		kvm_write_c0_guest_epc(cop0, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	case KVM_REG_MIPS_CP0_PRID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		kvm_write_c0_guest_prid(cop0, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	case KVM_REG_MIPS_CP0_EBASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		 * Allow core number to be written, but the exception base must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		 * remain in guest KSeg0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		kvm_change_c0_guest_ebase(cop0, 0x1ffff000 | MIPS_EBASE_CPUNUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 					  v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	case KVM_REG_MIPS_CP0_COUNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		kvm_mips_write_count(vcpu, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	case KVM_REG_MIPS_CP0_COMPARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		kvm_mips_write_compare(vcpu, v, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	case KVM_REG_MIPS_CP0_CAUSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		 * If the timer is stopped or started (DC bit) it must look
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		 * atomic with changes to the interrupt pending bits (TI, IRQ5).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		 * A timer interrupt should not happen in between.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 			if (v & CAUSEF_DC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 				/* disable timer first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 				kvm_mips_count_disable_cause(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 				kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 							  v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 				/* enable timer last */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 				kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 							  v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 				kvm_mips_count_enable_cause(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 			kvm_write_c0_guest_cause(cop0, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	case KVM_REG_MIPS_CP0_CONFIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		/* read-only for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	case KVM_REG_MIPS_CP0_CONFIG1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		cur = kvm_read_c0_guest_config1(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		if (change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 			v = cur ^ change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 			kvm_write_c0_guest_config1(cop0, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	case KVM_REG_MIPS_CP0_CONFIG2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		/* read-only for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	case KVM_REG_MIPS_CP0_CONFIG3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		cur = kvm_read_c0_guest_config3(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		if (change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 			v = cur ^ change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 			kvm_write_c0_guest_config3(cop0, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	case KVM_REG_MIPS_CP0_CONFIG4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		cur = kvm_read_c0_guest_config4(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		if (change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 			v = cur ^ change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 			kvm_write_c0_guest_config4(cop0, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	case KVM_REG_MIPS_CP0_CONFIG5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		cur = kvm_read_c0_guest_config5(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		if (change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 			v = cur ^ change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 			kvm_write_c0_guest_config5(cop0, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	case KVM_REG_MIPS_CP0_CONFIG7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		/* writes ignored */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	case KVM_REG_MIPS_COUNT_CTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		ret = kvm_mips_set_count_ctl(vcpu, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	case KVM_REG_MIPS_COUNT_RESUME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		ret = kvm_mips_set_count_resume(vcpu, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	case KVM_REG_MIPS_COUNT_HZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		ret = kvm_mips_set_count_hz(vcpu, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	case KVM_REG_MIPS_CP0_ERROREPC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		kvm_write_c0_guest_errorepc(cop0, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	case KVM_REG_MIPS_CP0_KSCRATCH1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		kvm_write_c0_guest_kscratch1(cop0, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	case KVM_REG_MIPS_CP0_KSCRATCH2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		kvm_write_c0_guest_kscratch2(cop0, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	case KVM_REG_MIPS_CP0_KSCRATCH3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		kvm_write_c0_guest_kscratch3(cop0, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	case KVM_REG_MIPS_CP0_KSCRATCH4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		kvm_write_c0_guest_kscratch4(cop0, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	case KVM_REG_MIPS_CP0_KSCRATCH5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		kvm_write_c0_guest_kscratch5(cop0, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	case KVM_REG_MIPS_CP0_KSCRATCH6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		kvm_write_c0_guest_kscratch6(cop0, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	 * Were we in guest context? If so, restore the appropriate ASID based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	 * on the mode of the Guest (Kernel/User).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	if (current->flags & PF_VCPU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		check_switch_mmu_context(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		kvm_mips_suspend_mm(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		ehb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	kvm_lose_fpu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	if (current->flags & PF_VCPU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		/* Restore normal Linux process memory map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		check_switch_mmu_context(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		kvm_mips_resume_mm(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		ehb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 					 bool reload_asid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	if (likely(!kvm_request_pending(vcpu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		 * Both kernel & user GVA mappings must be invalidated. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		 * caller is just about to check whether the ASID is stale
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		 * anyway so no need to reload it here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 			set_cpu_context(i, kern_mm, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 			set_cpu_context(i, user_mm, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		/* Generate new ASID for current mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		if (reload_asid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 			mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 			get_new_mmu_context(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 			htw_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 			write_c0_entryhi(cpu_asid(cpu, mm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 			TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 			htw_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)  * kvm_trap_emul_gva_lockless_begin() - Begin lockless access to GVA space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)  * @vcpu:	VCPU pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)  * Call before a GVA space access outside of guest mode, to ensure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)  * asynchronous TLB flush requests are handled or delayed until completion of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)  * the GVA access (as indicated by a matching kvm_trap_emul_gva_lockless_end()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)  * Should be called with IRQs already enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	/* We re-enable IRQs in kvm_trap_emul_gva_lockless_end() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	WARN_ON_ONCE(irqs_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	 * The caller is about to access the GVA space, so we set the mode to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	 * force TLB flush requests to send an IPI, and also disable IRQs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	 * delay IPI handling until kvm_trap_emul_gva_lockless_end().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	 * Make sure the read of VCPU requests is not reordered ahead of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	 * write to vcpu->mode, or we could miss a TLB flush request while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	 * the requester sees the VCPU as outside of guest mode and not needing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	 * an IPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	 * If a TLB flush has been requested (potentially while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	 * OUTSIDE_GUEST_MODE and assumed immediately effective), perform it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	 * before accessing the GVA space, and be sure to reload the ASID if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	 * necessary as it'll be immediately used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	 * TLB flush requests after this check will trigger an IPI due to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	 * mode change above, which will be delayed due to IRQs disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	kvm_trap_emul_check_requests(vcpu, smp_processor_id(), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)  * kvm_trap_emul_gva_lockless_end() - End lockless access to GVA space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)  * @vcpu:	VCPU pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)  * Called after a GVA space access outside of guest mode. Should have a matching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)  * call to kvm_trap_emul_gva_lockless_begin().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	 * Make sure the write to vcpu->mode is not reordered in front of GVA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	 * accesses, or a TLB flush requester may not think it necessary to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	 * an IPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	 * Now that the access to GVA space is complete, its safe for pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	 * TLB flush request IPIs to be handled (which indicates completion).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) static void kvm_trap_emul_vcpu_reenter(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	int i, cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	unsigned int gasid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	 * No need to reload ASID, IRQs are disabled already so there's no rush,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	 * and we'll check if we need to regenerate below anyway before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	 * re-entering the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	kvm_trap_emul_check_requests(vcpu, cpu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	if (KVM_GUEST_KERNEL_MODE(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		mm = kern_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		mm = user_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		 * Lazy host ASID regeneration / PT flush for guest user mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		 * If the guest ASID has changed since the last guest usermode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		 * execution, invalidate the stale TLB entries and flush GVA PT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		 * entries too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		if (gasid != vcpu->arch.last_user_gasid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 			kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 			for_each_possible_cpu(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 				set_cpu_context(i, user_mm, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 			vcpu->arch.last_user_gasid = gasid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	 * Check if ASID is stale. This may happen due to a TLB flush request or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	 * a lazy user MM invalidation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	check_mmu_context(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) static int kvm_trap_emul_vcpu_run(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	/* Check if we have any exceptions/interrupts pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	kvm_mips_deliver_interrupts(vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 				    kvm_read_c0_guest_cause(vcpu->arch.cop0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	kvm_trap_emul_vcpu_reenter(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	 * We use user accessors to access guest memory, but we don't want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	 * invoke Linux page faulting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	pagefault_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	/* Disable hardware page table walking while in guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	htw_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	 * While in guest context we're in the guest's address space, not the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	 * host process address space, so we need to be careful not to confuse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	 * e.g. cache management IPIs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	kvm_mips_suspend_mm(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	r = vcpu->arch.vcpu_run(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	/* We may have migrated while handling guest exits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	/* Restore normal Linux process memory map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	check_switch_mmu_context(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	kvm_mips_resume_mm(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	htw_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	pagefault_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	/* exit handlers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	.handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	.handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	.handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	.handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	.handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	.handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	.handle_syscall = kvm_trap_emul_handle_syscall,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	.handle_res_inst = kvm_trap_emul_handle_res_inst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	.handle_break = kvm_trap_emul_handle_break,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	.handle_trap = kvm_trap_emul_handle_trap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	.handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	.handle_fpe = kvm_trap_emul_handle_fpe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	.handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	.handle_guest_exit = kvm_trap_emul_no_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	.hardware_enable = kvm_trap_emul_hardware_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	.hardware_disable = kvm_trap_emul_hardware_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	.check_extension = kvm_trap_emul_check_extension,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	.vcpu_init = kvm_trap_emul_vcpu_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	.vcpu_uninit = kvm_trap_emul_vcpu_uninit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	.vcpu_setup = kvm_trap_emul_vcpu_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	.flush_shadow_all = kvm_trap_emul_flush_shadow_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	.flush_shadow_memslot = kvm_trap_emul_flush_shadow_memslot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	.gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	.queue_timer_int = kvm_mips_queue_timer_int_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	.dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	.queue_io_int = kvm_mips_queue_io_int_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	.dequeue_io_int = kvm_mips_dequeue_io_int_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	.irq_deliver = kvm_mips_irq_deliver_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	.irq_clear = kvm_mips_irq_clear_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	.num_regs = kvm_trap_emul_num_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	.copy_reg_indices = kvm_trap_emul_copy_reg_indices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	.get_one_reg = kvm_trap_emul_get_one_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	.set_one_reg = kvm_trap_emul_set_one_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	.vcpu_load = kvm_trap_emul_vcpu_load,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	.vcpu_put = kvm_trap_emul_vcpu_put,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	.vcpu_run = kvm_trap_emul_vcpu_run,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	.vcpu_reenter = kvm_trap_emul_vcpu_reenter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	*install_callbacks = &kvm_trap_emul_callbacks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }