^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * License. See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * KVM/MIPS: Instruction/Exception emulation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Authors: Sanjay Lal <sanjayl@kymasys.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/ktime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/cacheops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/cpu-info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/inst.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #undef CONFIG_MIPS_MT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/r4kcache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define CONFIG_MIPS_MT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "interrupt.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "commpage.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * Compute the return address and do emulate branch simulation, if required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * This function should be called only in branch delay slot active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned long *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) unsigned int dspcontrol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) union mips_instruction insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct kvm_vcpu_arch *arch = &vcpu->arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) long epc = instpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) long nextpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (epc & 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) kvm_err("%s: unaligned epc\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* Read the instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) switch (insn.i_format.opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* jr and jalr are in r_format format. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) case spec_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) switch (insn.r_format.func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) case jalr_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) arch->gprs[insn.r_format.rd] = epc + 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) case jr_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) nextpc = arch->gprs[insn.r_format.rs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * This group contains:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * bltz_op, bgez_op, bltzl_op, bgezl_op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) case bcond_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) switch (insn.i_format.rt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) case bltz_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) case bltzl_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if ((long)arch->gprs[insn.i_format.rs] < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) epc = epc + 4 + (insn.i_format.simmediate << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) epc += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) nextpc = epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) case bgez_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) case bgezl_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if ((long)arch->gprs[insn.i_format.rs] >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) epc = epc + 4 + (insn.i_format.simmediate << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) epc += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) nextpc = epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) case bltzal_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) case bltzall_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) arch->gprs[31] = epc + 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if ((long)arch->gprs[insn.i_format.rs] < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) epc = epc + 4 + (insn.i_format.simmediate << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) epc += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) nextpc = epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) case bgezal_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) case bgezall_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) arch->gprs[31] = epc + 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if ((long)arch->gprs[insn.i_format.rs] >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) epc = epc + 4 + (insn.i_format.simmediate << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) epc += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) nextpc = epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) case bposge32_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (!cpu_has_dsp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) kvm_err("%s: DSP branch but not DSP ASE\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) dspcontrol = rddsp(0x01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (dspcontrol >= 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) epc = epc + 4 + (insn.i_format.simmediate << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) epc += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) nextpc = epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* These are unconditional and in j_format. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) case jal_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) arch->gprs[31] = instpc + 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) case j_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) epc += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) epc >>= 28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) epc <<= 28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) epc |= (insn.j_format.target << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) nextpc = epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* These are conditional and in i_format. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) case beq_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) case beql_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (arch->gprs[insn.i_format.rs] ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) arch->gprs[insn.i_format.rt])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) epc = epc + 4 + (insn.i_format.simmediate << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) epc += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) nextpc = epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) case bne_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) case bnel_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (arch->gprs[insn.i_format.rs] !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) arch->gprs[insn.i_format.rt])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) epc = epc + 4 + (insn.i_format.simmediate << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) epc += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) nextpc = epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) case blez_op: /* POP06 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #ifndef CONFIG_CPU_MIPSR6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) case blezl_op: /* removed in R6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (insn.i_format.rt != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) goto compact_branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if ((long)arch->gprs[insn.i_format.rs] <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) epc = epc + 4 + (insn.i_format.simmediate << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) epc += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) nextpc = epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) case bgtz_op: /* POP07 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #ifndef CONFIG_CPU_MIPSR6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) case bgtzl_op: /* removed in R6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (insn.i_format.rt != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) goto compact_branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if ((long)arch->gprs[insn.i_format.rs] > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) epc = epc + 4 + (insn.i_format.simmediate << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) epc += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) nextpc = epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /* And now the FPA/cp1 branch instructions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) case cop1_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) kvm_err("%s: unsupported cop1_op\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #ifdef CONFIG_CPU_MIPSR6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /* R6 added the following compact branches with forbidden slots */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) case blezl_op: /* POP26 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) case bgtzl_op: /* POP27 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /* only rt == 0 isn't compact branch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (insn.i_format.rt != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) goto compact_branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) case pop10_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) case pop30_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /* only rs == rt == 0 is reserved, rest are compact branches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (insn.i_format.rs != 0 || insn.i_format.rt != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) goto compact_branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) case pop66_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) case pop76_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /* only rs == 0 isn't compact branch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (insn.i_format.rs != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) goto compact_branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) compact_branch:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * If we've hit an exception on the forbidden slot, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * the branch must not have been taken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) epc += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) nextpc = epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) compact_branch:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /* Fall through - Compact branches not supported before R6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) *out = nextpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (cause & CAUSEF_BD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) err = kvm_compute_return_epc(vcpu, vcpu->arch.pc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) &vcpu->arch.pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) vcpu->arch.pc += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * kvm_get_badinstr() - Get bad instruction encoding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * @opc: Guest pointer to faulting instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * @vcpu: KVM VCPU information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * Gets the instruction encoding of the faulting instruction, using the saved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * BadInstr register value if it exists, otherwise falling back to reading guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * memory at @opc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * Returns: The instruction encoding of the faulting instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (cpu_has_badinstr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) *out = vcpu->arch.host_cp0_badinstr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return kvm_get_inst(opc, vcpu, out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * kvm_get_badinstrp() - Get bad prior instruction encoding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * @opc: Guest pointer to prior faulting instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * @vcpu: KVM VCPU information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * Gets the instruction encoding of the prior faulting instruction (the branch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * containing the delay slot which faulted), using the saved BadInstrP register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * value if it exists, otherwise falling back to reading guest memory at @opc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * Returns: The instruction encoding of the prior faulting instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (cpu_has_badinstrp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) *out = vcpu->arch.host_cp0_badinstrp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return kvm_get_inst(opc, vcpu, out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * @vcpu: Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * Returns: 1 if the CP0_Count timer is disabled by either the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * CP0_Cause.DC bit or the count_ctl.DC bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * 0 otherwise (in which case CP0_Count timer is running).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) s64 now_ns, periods;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) u64 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) now_ns = ktime_to_ns(now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) delta = now_ns + vcpu->arch.count_dyn_bias;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (delta >= vcpu->arch.count_period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /* If delta is out of safe range the bias needs adjusting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) periods = div64_s64(now_ns, vcpu->arch.count_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /* Recalculate delta with new bias */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) delta = now_ns + vcpu->arch.count_dyn_bias;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * We've ensured that:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * delta < count_period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * Therefore the intermediate delta*count_hz will never overflow since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * at the boundary condition:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * delta = count_period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * delta = NSEC_PER_SEC * 2^32 / count_hz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * delta * count_hz = NSEC_PER_SEC * 2^32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * kvm_mips_count_time() - Get effective current time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * @vcpu: Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * except when the master disable bit is set in count_ctl, in which case it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * count_resume, i.e. the time that the count was disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * Returns: Effective monotonic ktime for CP0_Count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return vcpu->arch.count_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * kvm_mips_read_count_running() - Read the current count value as if running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * @vcpu: Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * @now: Kernel time to read CP0_Count at.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * Returns the current guest CP0_Count register at time @now and handles if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * timer interrupt is pending and hasn't been handled yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * Returns: The current value of the guest CP0_Count register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) ktime_t expires, threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) u32 count, compare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) int running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* Calculate the biased and scaled guest CP0_Count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) compare = kvm_read_c0_guest_compare(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * Find whether CP0_Count has reached the closest timer interrupt. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * not, we shouldn't inject it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if ((s32)(count - compare) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * The CP0_Count we're going to return has already reached the closest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * timer interrupt. Quickly check if it really is a new interrupt by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * looking at whether the interval until the hrtimer expiry time is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * less than 1/4 of the timer period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (ktime_before(expires, threshold)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * Cancel it while we handle it so there's no chance of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * interference with the timeout handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /* Nothing should be waiting on the timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) kvm_mips_callbacks->queue_timer_int(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * Restart the timer if it was running based on the expiry time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * we read, so that we don't push it back 2 periods.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) expires = ktime_add_ns(expires,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) vcpu->arch.count_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) hrtimer_start(&vcpu->arch.comparecount_timer, expires,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) HRTIMER_MODE_ABS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * kvm_mips_read_count() - Read the current count value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * @vcpu: Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * Read the current guest CP0_Count value, taking into account whether the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * is stopped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * Returns: The current guest CP0_Count value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /* If count disabled just read static copy of count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (kvm_mips_count_disabled(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return kvm_read_c0_guest_count(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return kvm_mips_read_count_running(vcpu, ktime_get());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * @vcpu: Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * @count: Output pointer for CP0_Count value at point of freeze.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * at the point it was frozen. It is guaranteed that any pending interrupts at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * the point it was frozen are handled, and none after that point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * This is useful where the time/CP0_Count is needed in the calculation of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * new parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * Returns: The ktime at the point of freeze.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) ktime_t now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /* stop hrtimer before finding time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) hrtimer_cancel(&vcpu->arch.comparecount_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) now = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /* find count at this point and handle pending hrtimer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) *count = kvm_mips_read_count_running(vcpu, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * @vcpu: Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * @now: ktime at point of resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * @count: CP0_Count at point of resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * Resumes the timer and updates the timer expiry based on @now and @count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * This can be used in conjunction with kvm_mips_freeze_timer() when timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * parameters need to be changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * It is guaranteed that a timer interrupt immediately after resume will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * handled, but not if CP_Compare is exactly at @count. That case is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * handled by kvm_mips_freeze_timer().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) ktime_t now, u32 count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) u32 compare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) u64 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) ktime_t expire;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /* Calculate timeout (wrap 0 to 2^32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) compare = kvm_read_c0_guest_compare(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) delta = (u64)(u32)(compare - count - 1) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) expire = ktime_add_ns(now, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /* Update hrtimer to use new timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) hrtimer_cancel(&vcpu->arch.comparecount_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * kvm_mips_restore_hrtimer() - Restore hrtimer after a gap, updating expiry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * @vcpu: Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * @before: Time before Count was saved, lower bound of drift calculation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * @count: CP0_Count at point of restore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * @min_drift: Minimum amount of drift permitted before correction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * Must be <= 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * Restores the timer from a particular @count, accounting for drift. This can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * be used in conjunction with kvm_mips_freeze_timer() when a hardware timer is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * to be used for a period of time, but the exact ktime corresponding to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * final Count that must be restored is not known.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * It is gauranteed that a timer interrupt immediately after restore will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * handled, but not if CP0_Compare is exactly at @count. That case should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * already be handled when the hardware timer state is saved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * stopped).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * Returns: Amount of correction to count_bias due to drift.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) u32 count, int min_drift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) ktime_t now, count_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) u32 now_count, before_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) u64 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) int drift, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) /* Calculate expected count at before */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) before_count = vcpu->arch.count_bias +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) kvm_mips_ktime_to_count(vcpu, before);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * Detect significantly negative drift, where count is lower than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * expected. Some negative drift is expected when hardware counter is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * set after kvm_mips_freeze_timer(), and it is harmless to allow the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * time to jump forwards a little, within reason. If the drift is too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * significant, adjust the bias to avoid a big Guest.CP0_Count jump.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) drift = count - before_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (drift < min_drift) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) count_time = before;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) vcpu->arch.count_bias += drift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) ret = drift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) goto resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) /* Calculate expected count right now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) now = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) now_count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * Detect positive drift, where count is higher than expected, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * adjust the bias to avoid guest time going backwards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) drift = count - now_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (drift > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) count_time = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) vcpu->arch.count_bias += drift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) ret = drift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) goto resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /* Subtract nanosecond delta to find ktime when count was read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) delta = (u64)(u32)(now_count - count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) count_time = ktime_sub_ns(now, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) resume:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /* Resume using the calculated ktime */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) kvm_mips_resume_hrtimer(vcpu, count_time, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * kvm_mips_write_count() - Modify the count and update timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * @vcpu: Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * @count: Guest CP0_Count value to set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * Sets the CP0_Count value and updates the timer accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) ktime_t now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /* Calculate bias */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) now = kvm_mips_count_time(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (kvm_mips_count_disabled(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) /* The timer's disabled, adjust the static count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) kvm_write_c0_guest_count(cop0, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /* Update timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) kvm_mips_resume_hrtimer(vcpu, now, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * kvm_mips_init_count() - Initialise timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * @vcpu: Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * @count_hz: Frequency of timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * Initialise the timer to the specified frequency, zero it, and set it going if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * it's enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) vcpu->arch.count_hz = count_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) vcpu->arch.count_dyn_bias = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /* Starting at 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) kvm_mips_write_count(vcpu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * kvm_mips_set_count_hz() - Update the frequency of the timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * @vcpu: Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * @count_hz: Frequency of CP0_Count timer in Hz.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * Change the frequency of the CP0_Count timer. This is done atomically so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * CP0_Count is continuous and no timer interrupt is lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * Returns: -EINVAL if @count_hz is out of range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) int dc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) ktime_t now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /* ensure the frequency is in a sensible range... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /* ... and has actually changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (vcpu->arch.count_hz == count_hz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) /* Safely freeze timer so we can keep it continuous */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) dc = kvm_mips_count_disabled(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (dc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) now = kvm_mips_count_time(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) count = kvm_read_c0_guest_count(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) now = kvm_mips_freeze_hrtimer(vcpu, &count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) /* Update the frequency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) vcpu->arch.count_hz = count_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) vcpu->arch.count_dyn_bias = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /* Calculate adjusted bias so dynamic count is unchanged */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /* Update and resume hrtimer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (!dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) kvm_mips_resume_hrtimer(vcpu, now, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * kvm_mips_write_compare() - Modify compare and update timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * @vcpu: Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * @compare: New CP0_Compare value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * @ack: Whether to acknowledge timer interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * Update CP0_Compare to a new value and update the timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * any pending timer interrupt is preserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) int dc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) u32 old_compare = kvm_read_c0_guest_compare(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) s32 delta = compare - old_compare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) u32 cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) ktime_t now = ktime_set(0, 0); /* silence bogus GCC warning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) /* if unchanged, must just be an ack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (old_compare == compare) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (!ack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) kvm_mips_callbacks->dequeue_timer_int(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) kvm_write_c0_guest_compare(cop0, compare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * If guest CP0_Compare moves forward, CP0_GTOffset should be adjusted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * too to prevent guest CP0_Count hitting guest CP0_Compare.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * The new GTOffset corresponds to the new value of CP0_Compare, and is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * set prior to it being written into the guest context. We disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * preemption until the new value is written to prevent restore of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * GTOffset corresponding to the old CP0_Compare value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) write_c0_gtoffset(compare - read_c0_count());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) back_to_back_c0_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /* freeze_hrtimer() takes care of timer interrupts <= count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) dc = kvm_mips_count_disabled(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (!dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) now = kvm_mips_freeze_hrtimer(vcpu, &count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (ack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) kvm_mips_callbacks->dequeue_timer_int(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) else if (IS_ENABLED(CONFIG_KVM_MIPS_VZ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * With VZ, writing CP0_Compare acks (clears) CP0_Cause.TI, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * preserve guest CP0_Cause.TI if we don't want to ack it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) cause = kvm_read_c0_guest_cause(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) kvm_write_c0_guest_compare(cop0, compare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (delta > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) back_to_back_c0_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (!ack && cause & CAUSEF_TI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) kvm_write_c0_guest_cause(cop0, cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) /* resume_hrtimer() takes care of timer interrupts > count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (!dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) kvm_mips_resume_hrtimer(vcpu, now, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * If guest CP0_Compare is moving backward, we delay CP0_GTOffset change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * until after the new CP0_Compare is written, otherwise new guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * CP0_Count could hit new guest CP0_Compare.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) write_c0_gtoffset(compare - read_c0_count());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * kvm_mips_count_disable() - Disable count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * @vcpu: Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * Disable the CP0_Count timer. A timer interrupt on or before the final stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * time will be handled but not after.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * count_ctl.DC has been set (count disabled).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * Returns: The time that the timer was stopped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) ktime_t now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) /* Stop hrtimer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) hrtimer_cancel(&vcpu->arch.comparecount_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /* Set the static count from the dynamic count, handling pending TI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) now = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) count = kvm_mips_read_count_running(vcpu, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) kvm_write_c0_guest_count(cop0, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * @vcpu: Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * before the final stop time will be handled if the timer isn't disabled by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * count_ctl.DC, but not after.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * Assumes CP0_Cause.DC is clear (count enabled).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) kvm_mips_count_disable(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * @vcpu: Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * the start time will be handled if the timer isn't disabled by count_ctl.DC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * potentially before even returning, so the caller should be careful with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * ordering of CP0_Cause modifications so as not to lose it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * Assumes CP0_Cause.DC is set (count disabled).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * Set the dynamic count to match the static count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * This starts the hrtimer if count_ctl.DC allows it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * Otherwise it conveniently updates the biases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) count = kvm_read_c0_guest_count(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) kvm_mips_write_count(vcpu, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * kvm_mips_set_count_ctl() - Update the count control KVM register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * @vcpu: Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * @count_ctl: Count control register new value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * Set the count control KVM register. The timer is updated accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * Returns: -EINVAL if reserved bits are set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) s64 changed = count_ctl ^ vcpu->arch.count_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) s64 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) ktime_t expire, now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) u32 count, compare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) /* Only allow defined bits to be changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) /* Apply new value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) vcpu->arch.count_ctl = count_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) /* Master CP0_Count disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) /* Is CP0_Cause.DC already disabling CP0_Count? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) /* Just record the current time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) vcpu->arch.count_resume = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) /* disable timer and record current time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * Calculate timeout relative to static count at resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * time (wrap 0 to 2^32).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) count = kvm_read_c0_guest_count(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) compare = kvm_read_c0_guest_compare(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) delta = (u64)(u32)(compare - count - 1) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) delta = div_u64(delta * NSEC_PER_SEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) vcpu->arch.count_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) expire = ktime_add_ns(vcpu->arch.count_resume, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) /* Handle pending interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) now = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (ktime_compare(now, expire) >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) /* Nothing should be waiting on the timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) kvm_mips_callbacks->queue_timer_int(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) /* Resume hrtimer without changing bias */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) count = kvm_mips_read_count_running(vcpu, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) kvm_mips_resume_hrtimer(vcpu, now, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * kvm_mips_set_count_resume() - Update the count resume KVM register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * @vcpu: Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * @count_resume: Count resume register new value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * Set the count resume KVM register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * Returns: -EINVAL if out of valid range (0..now).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * It doesn't make sense for the resume time to be in the future, as it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * would be possible for the next interrupt to be more than a full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * period in the future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) vcpu->arch.count_resume = ns_to_ktime(count_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * kvm_mips_count_timeout() - Push timer forward on timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * @vcpu: Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * Handle an hrtimer event by push the hrtimer forward a period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) /* Add the Count period to the current expiry time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) vcpu->arch.count_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) return HRTIMER_RESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) kvm_clear_c0_guest_status(cop0, ST0_ERL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) kvm_read_c0_guest_epc(cop0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) kvm_clear_c0_guest_status(cop0, ST0_EXL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) vcpu->arch.pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) vcpu->arch.pending_exceptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) ++vcpu->stat.wait_exits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (!vcpu->arch.pending_exceptions) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) kvm_vz_lose_htimer(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) vcpu->arch.wait = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) kvm_vcpu_block(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * We we are runnable, then definitely go off to user space to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * check if any I/O interrupts are pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) kvm_clear_request(KVM_REQ_UNHALT, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) unsigned long entryhi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) int cpu, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) u32 nasid = entryhi & KVM_ENTRYHI_ASID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (((kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID) != nasid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) trace_kvm_asid_change(vcpu, kvm_read_c0_guest_entryhi(cop0) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) KVM_ENTRYHI_ASID, nasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * Flush entries from the GVA page tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * Guest user page table will get flushed lazily on re-entry to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * guest user if the guest ASID actually changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_KERN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * Regenerate/invalidate kernel MMU context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * The user MMU context will be regenerated lazily on re-entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * to guest user if the guest ASID actually changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) get_new_mmu_context(kern_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) for_each_possible_cpu(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (i != cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) set_cpu_context(i, kern_mm, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) kvm_write_c0_guest_entryhi(cop0, entryhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) struct kvm_mips_tlb *tlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) unsigned long pc = vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) index = kvm_read_c0_guest_index(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) /* UNDEFINED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) kvm_debug("[%#lx] TLBR Index %#x out of range\n", pc, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) index &= KVM_MIPS_GUEST_TLB_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) tlb = &vcpu->arch.guest_tlb[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) kvm_write_c0_guest_pagemask(cop0, tlb->tlb_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) kvm_write_c0_guest_entrylo0(cop0, tlb->tlb_lo[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) kvm_write_c0_guest_entrylo1(cop0, tlb->tlb_lo[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) kvm_mips_change_entryhi(vcpu, tlb->tlb_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * @vcpu: VCPU with changed mappings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * @tlb: TLB entry being removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) * This is called to indicate a single change in guest MMU mappings, so that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) * can arrange TLB flushes on this and other CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) struct kvm_mips_tlb *tlb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) int cpu, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) bool user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) /* No need to flush for entries which are already invalid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) /* Don't touch host kernel page tables or TLB mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if ((unsigned long)tlb->tlb_hi > 0x7fffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) /* User address space doesn't need flushing for KSeg2/3 changes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) user = tlb->tlb_hi < KVM_GUEST_KSEG0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) /* Invalidate page table entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) kvm_trap_emul_invalidate_gva(vcpu, tlb->tlb_hi & VPN2_MASK, user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * Probe the shadow host TLB for the entry being overwritten, if one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) * matches, invalidate it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi, user, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) /* Invalidate the whole ASID on other CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (i == cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) set_cpu_context(i, user_mm, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) set_cpu_context(i, kern_mm, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) /* Write Guest TLB Entry @ Index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) int index = kvm_read_c0_guest_index(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) struct kvm_mips_tlb *tlb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) unsigned long pc = vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) kvm_debug("%s: illegal index: %d\n", __func__, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) pc, index, kvm_read_c0_guest_entryhi(cop0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) kvm_read_c0_guest_entrylo0(cop0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) kvm_read_c0_guest_entrylo1(cop0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) kvm_read_c0_guest_pagemask(cop0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) tlb = &vcpu->arch.guest_tlb[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) kvm_mips_invalidate_guest_tlb(vcpu, tlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) pc, index, kvm_read_c0_guest_entryhi(cop0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) kvm_read_c0_guest_entrylo0(cop0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) kvm_read_c0_guest_entrylo1(cop0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) kvm_read_c0_guest_pagemask(cop0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) /* Write Guest TLB Entry @ Random Index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) struct kvm_mips_tlb *tlb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) unsigned long pc = vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) index = prandom_u32_max(KVM_MIPS_GUEST_TLB_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) tlb = &vcpu->arch.guest_tlb[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) kvm_mips_invalidate_guest_tlb(vcpu, tlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) pc, index, kvm_read_c0_guest_entryhi(cop0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) kvm_read_c0_guest_entrylo0(cop0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) kvm_read_c0_guest_entrylo1(cop0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) long entryhi = kvm_read_c0_guest_entryhi(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) unsigned long pc = vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) int index = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) kvm_write_c0_guest_index(cop0, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * @vcpu: Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * Finds the mask of bits which are writable in the guest's Config1 CP0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * register, by userland (currently read-only to the guest).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) unsigned int mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) /* Permit FPU to be present if FPU is supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) mask |= MIPS_CONF1_FP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * @vcpu: Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) * Finds the mask of bits which are writable in the guest's Config3 CP0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * register, by userland (currently read-only to the guest).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) /* Config4 and ULRI are optional */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) unsigned int mask = MIPS_CONF_M | MIPS_CONF3_ULRI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) /* Permit MSA to be present if MSA is supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) if (kvm_mips_guest_can_have_msa(&vcpu->arch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) mask |= MIPS_CONF3_MSA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) * @vcpu: Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) * Finds the mask of bits which are writable in the guest's Config4 CP0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) * register, by userland (currently read-only to the guest).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) /* Config5 is optional */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) unsigned int mask = MIPS_CONF_M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) /* KScrExist */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) mask |= 0xfc << MIPS_CONF4_KSCREXIST_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) * @vcpu: Virtual CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * Finds the mask of bits which are writable in the guest's Config5 CP0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) * register, by the guest itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) unsigned int mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) /* Permit MSAEn changes if MSA supported and enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) if (kvm_mips_guest_has_msa(&vcpu->arch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) mask |= MIPS_CONF5_MSAEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) * Permit guest FPU mode changes if FPU is enabled and the relevant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) * feature exists according to FIR register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) if (cpu_has_fre)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) mask |= MIPS_CONF5_FRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) /* We don't support UFR or UFE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) u32 *opc, u32 cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) u32 rt, rd, sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) unsigned long curr_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) * Update PC and hold onto current PC in case there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) * an error and we want to rollback the PC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) curr_pc = vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) er = update_pc(vcpu, cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) if (er == EMULATE_FAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) if (inst.co_format.co) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) switch (inst.co_format.func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) case tlbr_op: /* Read indexed TLB entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) er = kvm_mips_emul_tlbr(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) case tlbwi_op: /* Write indexed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) er = kvm_mips_emul_tlbwi(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) case tlbwr_op: /* Write random */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) er = kvm_mips_emul_tlbwr(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) case tlbp_op: /* TLB Probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) er = kvm_mips_emul_tlbp(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) case rfe_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) kvm_err("!!!COP0_RFE!!!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) case eret_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) er = kvm_mips_emul_eret(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) goto dont_update_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) case wait_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) er = kvm_mips_emul_wait(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) case hypcall_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) er = kvm_mips_emul_hypcall(vcpu, inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) rt = inst.c0r_format.rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) rd = inst.c0r_format.rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) sel = inst.c0r_format.sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) switch (inst.c0r_format.rs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) case mfc_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) cop0->stat[rd][sel]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) /* Get reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) vcpu->arch.gprs[rt] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) (s32)kvm_mips_read_count(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) vcpu->arch.gprs[rt] = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) #ifdef CONFIG_KVM_MIPS_DYN_TRANS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) kvm_mips_trans_mfc0(inst, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) #ifdef CONFIG_KVM_MIPS_DYN_TRANS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) kvm_mips_trans_mfc0(inst, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) trace_kvm_hwr(vcpu, KVM_TRACE_MFC0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) KVM_TRACE_COP0(rd, sel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) vcpu->arch.gprs[rt]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) case dmfc_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) KVM_TRACE_COP0(rd, sel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) vcpu->arch.gprs[rt]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) case mtc_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) cop0->stat[rd][sel]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) trace_kvm_hwr(vcpu, KVM_TRACE_MTC0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) KVM_TRACE_COP0(rd, sel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) vcpu->arch.gprs[rt]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if ((rd == MIPS_CP0_TLB_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) && (vcpu->arch.gprs[rt] >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) KVM_MIPS_GUEST_TLB_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) kvm_err("Invalid TLB Index: %ld",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) vcpu->arch.gprs[rt]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * Preserve core number, and keep the exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) * base in guest KSeg0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) kvm_change_c0_guest_ebase(cop0, 0x1ffff000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) vcpu->arch.gprs[rt]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) kvm_mips_change_entryhi(vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) vcpu->arch.gprs[rt]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) /* Are we writing to COUNT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) /* If we are writing to COMPARE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) /* Clear pending timer interrupt, if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) kvm_mips_write_compare(vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) vcpu->arch.gprs[rt],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) unsigned int old_val, val, change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) old_val = kvm_read_c0_guest_status(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) val = vcpu->arch.gprs[rt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) change = val ^ old_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) /* Make sure that the NMI bit is never set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) val &= ~ST0_NMI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) * Don't allow CU1 or FR to be set unless FPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) * capability enabled and exists in guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) * configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (!kvm_mips_guest_has_fpu(&vcpu->arch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) val &= ~(ST0_CU1 | ST0_FR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) * Also don't allow FR to be set if host doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) * support it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) val &= ~ST0_FR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) /* Handle changes in FPU mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) * FPU and Vector register state is made
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) * UNPREDICTABLE by a change of FR, so don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) * even bother saving it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) if (change & ST0_FR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) kvm_drop_fpu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) * If MSA state is already live, it is undefined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) * how it interacts with FR=0 FPU state, and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) * don't want to hit reserved instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) * exceptions trying to save the MSA state later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) * when CU=1 && FR=1, so play it safe and save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) * it first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) if (change & ST0_CU1 && !(val & ST0_FR) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) kvm_lose_fpu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) * Propagate CU1 (FPU enable) changes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) * immediately if the FPU context is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) * loaded. When disabling we leave the context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) * loaded so it can be quickly enabled again in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) * the near future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (change & ST0_CU1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) change_c0_status(ST0_CU1, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) kvm_write_c0_guest_status(cop0, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) #ifdef CONFIG_KVM_MIPS_DYN_TRANS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) * If FPU present, we need CU1/FR bits to take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) * effect fairly soon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) if (!kvm_mips_guest_has_fpu(&vcpu->arch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) kvm_mips_trans_mtc0(inst, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) unsigned int old_val, val, change, wrmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) old_val = kvm_read_c0_guest_config5(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) val = vcpu->arch.gprs[rt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) /* Only a few bits are writable in Config5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) wrmask = kvm_mips_config5_wrmask(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) change = (val ^ old_val) & wrmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) val = old_val ^ change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) /* Handle changes in FPU/MSA modes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) * Propagate FRE changes immediately if the FPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) * context is already loaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) if (change & MIPS_CONF5_FRE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) change_c0_config5(MIPS_CONF5_FRE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) * Propagate MSAEn changes immediately if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) * MSA context is already loaded. When disabling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) * we leave the context loaded so it can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) * quickly enabled again in the near future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) if (change & MIPS_CONF5_MSAEN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) change_c0_config5(MIPS_CONF5_MSAEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) kvm_write_c0_guest_config5(cop0, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) u32 old_cause, new_cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) old_cause = kvm_read_c0_guest_cause(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) new_cause = vcpu->arch.gprs[rt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) /* Update R/W bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) kvm_change_c0_guest_cause(cop0, 0x08800300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) new_cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) /* DC bit enabling/disabling timer? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) if ((old_cause ^ new_cause) & CAUSEF_DC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) if (new_cause & CAUSEF_DC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) kvm_mips_count_disable_cause(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) kvm_mips_count_enable_cause(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) } else if ((rd == MIPS_CP0_HWRENA) && (sel == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) u32 mask = MIPS_HWRENA_CPUNUM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) MIPS_HWRENA_SYNCISTEP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) MIPS_HWRENA_CC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) MIPS_HWRENA_CCRES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) if (kvm_read_c0_guest_config3(cop0) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) MIPS_CONF3_ULRI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) mask |= MIPS_HWRENA_ULR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) #ifdef CONFIG_KVM_MIPS_DYN_TRANS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) kvm_mips_trans_mtc0(inst, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) case dmtc_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) vcpu->arch.pc, rt, rd, sel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) KVM_TRACE_COP0(rd, sel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) vcpu->arch.gprs[rt]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) case mfmc0_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) cop0->stat[MIPS_CP0_STATUS][0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) if (rt != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) vcpu->arch.gprs[rt] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) kvm_read_c0_guest_status(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) /* EI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) if (inst.mfmc0_format.sc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) kvm_debug("[%#lx] mfmc0_op: EI\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) vcpu->arch.pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) kvm_set_c0_guest_status(cop0, ST0_IE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) kvm_debug("[%#lx] mfmc0_op: DI\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) vcpu->arch.pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) kvm_clear_c0_guest_status(cop0, ST0_IE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) case wrpgpr_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) u32 pss =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) * We don't support any shadow register sets, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) * SRSCtl[PSS] == SRSCtl[CSS] = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if (css || pss) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) vcpu->arch.gprs[rt]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) vcpu->arch.pc, inst.c0r_format.rs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) /* Rollback PC only if emulation was unsuccessful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) if (er == EMULATE_FAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) vcpu->arch.pc = curr_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) dont_update_pc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) * This is for special instructions whose emulation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) * updates the PC, so do not overwrite the PC under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) * any circumstances
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) u32 cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) enum emulation_result er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) u32 rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) struct kvm_run *run = vcpu->run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) void *data = run->mmio.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) unsigned int imme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) unsigned long curr_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) * Update PC and hold onto current PC in case there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) * an error and we want to rollback the PC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) curr_pc = vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) er = update_pc(vcpu, cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) if (er == EMULATE_FAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) rt = inst.i_format.rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) vcpu->arch.host_cp0_badvaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) if (run->mmio.phys_addr == KVM_INVALID_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) goto out_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) switch (inst.i_format.opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) #if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) case sd_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) run->mmio.len = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) *(u64 *)data = vcpu->arch.gprs[rt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) kvm_debug("[%#lx] OP_SD: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) vcpu->arch.gprs[rt], *(u64 *)data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) case sw_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) run->mmio.len = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) *(u32 *)data = vcpu->arch.gprs[rt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) vcpu->arch.gprs[rt], *(u32 *)data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) case sh_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) run->mmio.len = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) *(u16 *)data = vcpu->arch.gprs[rt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) vcpu->arch.gprs[rt], *(u16 *)data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) case sb_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) run->mmio.len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) *(u8 *)data = vcpu->arch.gprs[rt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) kvm_debug("[%#lx] OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) vcpu->arch.gprs[rt], *(u8 *)data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) case swl_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) vcpu->arch.host_cp0_badvaddr) & (~0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) run->mmio.len = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) imme = vcpu->arch.host_cp0_badvaddr & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) switch (imme) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) *(u32 *)data = ((*(u32 *)data) & 0xffffff00) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) (vcpu->arch.gprs[rt] >> 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) *(u32 *)data = ((*(u32 *)data) & 0xffff0000) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) (vcpu->arch.gprs[rt] >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) *(u32 *)data = ((*(u32 *)data) & 0xff000000) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) (vcpu->arch.gprs[rt] >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) *(u32 *)data = vcpu->arch.gprs[rt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) kvm_debug("[%#lx] OP_SWL: eaddr: %#lx, gpr: %#lx, data: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) vcpu->arch.gprs[rt], *(u32 *)data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) case swr_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) vcpu->arch.host_cp0_badvaddr) & (~0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) run->mmio.len = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) imme = vcpu->arch.host_cp0_badvaddr & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) switch (imme) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) *(u32 *)data = vcpu->arch.gprs[rt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) *(u32 *)data = ((*(u32 *)data) & 0xff) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) (vcpu->arch.gprs[rt] << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) *(u32 *)data = ((*(u32 *)data) & 0xffff) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) (vcpu->arch.gprs[rt] << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) *(u32 *)data = ((*(u32 *)data) & 0xffffff) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) (vcpu->arch.gprs[rt] << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) kvm_debug("[%#lx] OP_SWR: eaddr: %#lx, gpr: %#lx, data: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) vcpu->arch.gprs[rt], *(u32 *)data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) #if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) case sdl_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) vcpu->arch.host_cp0_badvaddr) & (~0x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) run->mmio.len = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) imme = vcpu->arch.host_cp0_badvaddr & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) switch (imme) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff00) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) ((vcpu->arch.gprs[rt] >> 56) & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff0000) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) ((vcpu->arch.gprs[rt] >> 48) & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) *(u64 *)data = ((*(u64 *)data) & 0xffffffffff000000) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) ((vcpu->arch.gprs[rt] >> 40) & 0xffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) *(u64 *)data = ((*(u64 *)data) & 0xffffffff00000000) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) ((vcpu->arch.gprs[rt] >> 32) & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) *(u64 *)data = ((*(u64 *)data) & 0xffffff0000000000) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) ((vcpu->arch.gprs[rt] >> 24) & 0xffffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) *(u64 *)data = ((*(u64 *)data) & 0xffff000000000000) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) ((vcpu->arch.gprs[rt] >> 16) & 0xffffffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) *(u64 *)data = ((*(u64 *)data) & 0xff00000000000000) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) ((vcpu->arch.gprs[rt] >> 8) & 0xffffffffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) case 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) *(u64 *)data = vcpu->arch.gprs[rt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) kvm_debug("[%#lx] OP_SDL: eaddr: %#lx, gpr: %#lx, data: %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) vcpu->arch.gprs[rt], *(u64 *)data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) case sdr_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) vcpu->arch.host_cp0_badvaddr) & (~0x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) run->mmio.len = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) imme = vcpu->arch.host_cp0_badvaddr & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) switch (imme) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) *(u64 *)data = vcpu->arch.gprs[rt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) *(u64 *)data = ((*(u64 *)data) & 0xff) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) (vcpu->arch.gprs[rt] << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) *(u64 *)data = ((*(u64 *)data) & 0xffff) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) (vcpu->arch.gprs[rt] << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) *(u64 *)data = ((*(u64 *)data) & 0xffffff) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) (vcpu->arch.gprs[rt] << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) *(u64 *)data = ((*(u64 *)data) & 0xffffffff) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) (vcpu->arch.gprs[rt] << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) *(u64 *)data = ((*(u64 *)data) & 0xffffffffff) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) (vcpu->arch.gprs[rt] << 40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) (vcpu->arch.gprs[rt] << 48);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) case 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) (vcpu->arch.gprs[rt] << 56);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) kvm_debug("[%#lx] OP_SDR: eaddr: %#lx, gpr: %#lx, data: %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) vcpu->arch.gprs[rt], *(u64 *)data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) #ifdef CONFIG_CPU_LOONGSON64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) case sdc2_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) rt = inst.loongson3_lsdc2_format.rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) switch (inst.loongson3_lsdc2_format.opcode1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) * Loongson-3 overridden sdc2 instructions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) * opcode1 instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) * 0x0 gssbx: store 1 bytes from GPR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) * 0x1 gsshx: store 2 bytes from GPR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) * 0x2 gsswx: store 4 bytes from GPR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) * 0x3 gssdx: store 8 bytes from GPR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) case 0x0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) run->mmio.len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) *(u8 *)data = vcpu->arch.gprs[rt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) kvm_debug("[%#lx] OP_GSSBX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) vcpu->arch.gprs[rt], *(u8 *)data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) case 0x1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) run->mmio.len = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) *(u16 *)data = vcpu->arch.gprs[rt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) kvm_debug("[%#lx] OP_GSSSHX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) vcpu->arch.gprs[rt], *(u16 *)data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) case 0x2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) run->mmio.len = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) *(u32 *)data = vcpu->arch.gprs[rt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) kvm_debug("[%#lx] OP_GSSWX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) vcpu->arch.gprs[rt], *(u32 *)data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) case 0x3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) run->mmio.len = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) *(u64 *)data = vcpu->arch.gprs[rt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) kvm_debug("[%#lx] OP_GSSDX: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) vcpu->arch.gprs[rt], *(u64 *)data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) kvm_err("Godson Extended GS-Store not yet supported (inst=0x%08x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) inst.word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) kvm_err("Store not yet supported (inst=0x%08x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) inst.word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) goto out_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) vcpu->mmio_needed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) run->mmio.is_write = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) vcpu->mmio_is_write = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) r = kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) run->mmio.phys_addr, run->mmio.len, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) if (!r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) vcpu->mmio_needed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) return EMULATE_DO_MMIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) out_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) /* Rollback PC if emulation was unsuccessful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) vcpu->arch.pc = curr_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) return EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) u32 cause, struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) struct kvm_run *run = vcpu->run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) enum emulation_result er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) unsigned long curr_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) u32 op, rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) unsigned int imme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) rt = inst.i_format.rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) op = inst.i_format.opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) * Find the resume PC now while we have safe and easy access to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) * prior branch instruction, and save it for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) * kvm_mips_complete_mmio_load() to restore later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) curr_pc = vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) er = update_pc(vcpu, cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) if (er == EMULATE_FAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) vcpu->arch.io_pc = vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) vcpu->arch.pc = curr_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) vcpu->arch.io_gpr = rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) vcpu->arch.host_cp0_badvaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) if (run->mmio.phys_addr == KVM_INVALID_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) return EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) vcpu->mmio_needed = 2; /* signed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) switch (op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) #if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) case ld_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) run->mmio.len = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) case lwu_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) vcpu->mmio_needed = 1; /* unsigned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) case lw_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) run->mmio.len = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) case lhu_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) vcpu->mmio_needed = 1; /* unsigned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) case lh_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) run->mmio.len = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) case lbu_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) vcpu->mmio_needed = 1; /* unsigned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) case lb_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) run->mmio.len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) case lwl_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) vcpu->arch.host_cp0_badvaddr) & (~0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) run->mmio.len = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) imme = vcpu->arch.host_cp0_badvaddr & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) switch (imme) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) vcpu->mmio_needed = 3; /* 1 byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) vcpu->mmio_needed = 4; /* 2 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) vcpu->mmio_needed = 5; /* 3 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) vcpu->mmio_needed = 6; /* 4 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) case lwr_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) vcpu->arch.host_cp0_badvaddr) & (~0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) run->mmio.len = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) imme = vcpu->arch.host_cp0_badvaddr & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) switch (imme) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) vcpu->mmio_needed = 7; /* 4 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) vcpu->mmio_needed = 8; /* 3 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) vcpu->mmio_needed = 9; /* 2 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) vcpu->mmio_needed = 10; /* 1 byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) #if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) case ldl_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) vcpu->arch.host_cp0_badvaddr) & (~0x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) run->mmio.len = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) imme = vcpu->arch.host_cp0_badvaddr & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) switch (imme) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) vcpu->mmio_needed = 11; /* 1 byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) vcpu->mmio_needed = 12; /* 2 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) vcpu->mmio_needed = 13; /* 3 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) vcpu->mmio_needed = 14; /* 4 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) vcpu->mmio_needed = 15; /* 5 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) vcpu->mmio_needed = 16; /* 6 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) vcpu->mmio_needed = 17; /* 7 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) case 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) vcpu->mmio_needed = 18; /* 8 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) case ldr_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) vcpu->arch.host_cp0_badvaddr) & (~0x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) run->mmio.len = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) imme = vcpu->arch.host_cp0_badvaddr & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) switch (imme) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) vcpu->mmio_needed = 19; /* 8 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) vcpu->mmio_needed = 20; /* 7 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) vcpu->mmio_needed = 21; /* 6 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) vcpu->mmio_needed = 22; /* 5 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) vcpu->mmio_needed = 23; /* 4 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) vcpu->mmio_needed = 24; /* 3 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) vcpu->mmio_needed = 25; /* 2 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) case 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) vcpu->mmio_needed = 26; /* 1 byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) #ifdef CONFIG_CPU_LOONGSON64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) case ldc2_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) rt = inst.loongson3_lsdc2_format.rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) switch (inst.loongson3_lsdc2_format.opcode1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) * Loongson-3 overridden ldc2 instructions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) * opcode1 instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) * 0x0 gslbx: store 1 bytes from GPR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) * 0x1 gslhx: store 2 bytes from GPR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) * 0x2 gslwx: store 4 bytes from GPR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) * 0x3 gsldx: store 8 bytes from GPR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) case 0x0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) run->mmio.len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) vcpu->mmio_needed = 27; /* signed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) case 0x1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) run->mmio.len = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) vcpu->mmio_needed = 28; /* signed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) case 0x2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) run->mmio.len = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) vcpu->mmio_needed = 29; /* signed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) case 0x3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) run->mmio.len = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) vcpu->mmio_needed = 30; /* signed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) kvm_err("Godson Extended GS-Load for float not yet supported (inst=0x%08x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) inst.word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) kvm_err("Load not yet supported (inst=0x%08x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) inst.word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) vcpu->mmio_needed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) return EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) run->mmio.is_write = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) vcpu->mmio_is_write = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) r = kvm_io_bus_read(vcpu, KVM_MMIO_BUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) run->mmio.phys_addr, run->mmio.len, run->mmio.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) if (!r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) kvm_mips_complete_mmio_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) vcpu->mmio_needed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) return EMULATE_DO_MMIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) #ifndef CONFIG_KVM_MIPS_VZ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) unsigned long curr_pc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) u32 cause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) /* Carefully attempt the cache operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) kvm_trap_emul_gva_lockless_begin(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) err = fn(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) kvm_trap_emul_gva_lockless_end(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) if (likely(!err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) * Try to handle the fault and retry, maybe we just raced with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) * GVA invalidation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) switch (kvm_trap_emul_gva_fault(vcpu, addr, false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) case KVM_MIPS_GVA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) case KVM_MIPS_GPA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) /* bad virtual or physical address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) return EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) case KVM_MIPS_TLB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) /* no matching guest TLB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) vcpu->arch.host_cp0_badvaddr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) vcpu->arch.pc = curr_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) kvm_mips_emulate_tlbmiss_ld(cause, NULL, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) return EMULATE_EXCEPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) case KVM_MIPS_TLBINV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) /* invalid matching guest TLB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) vcpu->arch.host_cp0_badvaddr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) vcpu->arch.pc = curr_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) kvm_mips_emulate_tlbinv_ld(cause, NULL, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) return EMULATE_EXCEPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) u32 *opc, u32 cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) u32 cache, op_inst, op, base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) s16 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) struct kvm_vcpu_arch *arch = &vcpu->arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) unsigned long va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) unsigned long curr_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) * Update PC and hold onto current PC in case there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) * an error and we want to rollback the PC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) curr_pc = vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) er = update_pc(vcpu, cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) if (er == EMULATE_FAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) base = inst.i_format.rs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) op_inst = inst.i_format.rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) if (cpu_has_mips_r6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) offset = inst.spec3_format.simmediate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) offset = inst.i_format.simmediate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) cache = op_inst & CacheOp_Cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) op = op_inst & CacheOp_Op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) va = arch->gprs[base] + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) cache, op, base, arch->gprs[base], offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) * Treat INDEX_INV as a nop, basically issued by Linux on startup to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) * invalidate the caches entirely by stepping through all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) * ways/indexes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) if (op == Index_Writeback_Inv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) arch->gprs[base], offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) if (cache == Cache_D) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) #ifdef CONFIG_CPU_R4K_CACHE_TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) r4k_blast_dcache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) switch (boot_cpu_type()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) case CPU_CAVIUM_OCTEON3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) /* locally flush icache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) local_flush_icache_range(0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) __flush_cache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) } else if (cache == Cache_I) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) #ifdef CONFIG_CPU_R4K_CACHE_TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) r4k_blast_icache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) switch (boot_cpu_type()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) case CPU_CAVIUM_OCTEON3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) /* locally flush icache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) local_flush_icache_range(0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) flush_icache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) kvm_err("%s: unsupported CACHE INDEX operation\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) return EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) #ifdef CONFIG_KVM_MIPS_DYN_TRANS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) kvm_mips_trans_cache_index(inst, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) * Perform the dcache part of icache synchronisation on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) * guest's behalf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) er = kvm_mips_guest_cache_op(protected_writeback_dcache_line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) curr_pc, va, vcpu, cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) if (er != EMULATE_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) #ifdef CONFIG_KVM_MIPS_DYN_TRANS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) * Replace the CACHE instruction, with a SYNCI, not the same,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) * but avoids a trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) kvm_mips_trans_cache_va(inst, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) } else if (op_inst == Hit_Invalidate_I) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) /* Perform the icache synchronisation on the guest's behalf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) er = kvm_mips_guest_cache_op(protected_writeback_dcache_line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) curr_pc, va, vcpu, cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) if (er != EMULATE_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) er = kvm_mips_guest_cache_op(protected_flush_icache_line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) curr_pc, va, vcpu, cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) if (er != EMULATE_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) #ifdef CONFIG_KVM_MIPS_DYN_TRANS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) /* Replace the CACHE instruction, with a SYNCI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) kvm_mips_trans_cache_va(inst, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) cache, op, base, arch->gprs[base], offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) /* Rollback PC only if emulation was unsuccessful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) if (er == EMULATE_FAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) vcpu->arch.pc = curr_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) /* Guest exception needs guest to resume */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) if (er == EMULATE_EXCEPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) union mips_instruction inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) /* Fetch the instruction. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) if (cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) opc += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) err = kvm_get_badinstr(opc, vcpu, &inst.word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) return EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) switch (inst.r_format.opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) case cop0_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) er = kvm_mips_emulate_CP0(inst, opc, cause, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) #ifndef CONFIG_CPU_MIPSR6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) case cache_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) ++vcpu->stat.cache_exits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) er = kvm_mips_emulate_cache(inst, opc, cause, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) case spec3_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) switch (inst.spec3_format.func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) case cache6_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) ++vcpu->stat.cache_exits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) er = kvm_mips_emulate_cache(inst, opc, cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) goto unknown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) unknown:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) inst.word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) kvm_arch_vcpu_dump_regs(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) #endif /* CONFIG_KVM_MIPS_VZ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) * kvm_mips_guest_exception_base() - Find guest exception vector base address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) * Returns: The base address of the current guest exception vector, taking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) * both Guest.CP0_Status.BEV and Guest.CP0_EBase into account.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) if (kvm_read_c0_guest_status(cop0) & ST0_BEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) return KVM_GUEST_CKSEG1ADDR(0x1fc00200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) return kvm_read_c0_guest_ebase(cop0) & MIPS_EBASE_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) enum emulation_result kvm_mips_emulate_syscall(u32 cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) u32 *opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) struct kvm_vcpu_arch *arch = &vcpu->arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) /* save old pc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) kvm_write_c0_guest_epc(cop0, arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) kvm_set_c0_guest_status(cop0, ST0_EXL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) if (cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) kvm_change_c0_guest_cause(cop0, (0xff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) (EXCCODE_SYS << CAUSEB_EXCCODE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) /* Set PC to the exception entry point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) u32 *opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) struct kvm_vcpu_arch *arch = &vcpu->arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) /* save old pc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) kvm_write_c0_guest_epc(cop0, arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) kvm_set_c0_guest_status(cop0, ST0_EXL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) if (cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) /* set pc to the exception entry point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) kvm_change_c0_guest_cause(cop0, (0xff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) (EXCCODE_TLBL << CAUSEB_EXCCODE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) /* setup badvaddr, context and entryhi registers for the guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) /* XXXKYMA: is the context register used by linux??? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) kvm_write_c0_guest_entryhi(cop0, entryhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) u32 *opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) struct kvm_vcpu_arch *arch = &vcpu->arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) unsigned long entryhi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) /* save old pc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) kvm_write_c0_guest_epc(cop0, arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) kvm_set_c0_guest_status(cop0, ST0_EXL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) if (cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) /* set pc to the exception entry point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) kvm_change_c0_guest_cause(cop0, (0xff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) (EXCCODE_TLBL << CAUSEB_EXCCODE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) /* setup badvaddr, context and entryhi registers for the guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) /* XXXKYMA: is the context register used by linux??? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) kvm_write_c0_guest_entryhi(cop0, entryhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) u32 *opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) struct kvm_vcpu_arch *arch = &vcpu->arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) /* save old pc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) kvm_write_c0_guest_epc(cop0, arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) kvm_set_c0_guest_status(cop0, ST0_EXL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) if (cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) /* Set PC to the exception entry point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) kvm_change_c0_guest_cause(cop0, (0xff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) (EXCCODE_TLBS << CAUSEB_EXCCODE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) /* setup badvaddr, context and entryhi registers for the guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) /* XXXKYMA: is the context register used by linux??? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) kvm_write_c0_guest_entryhi(cop0, entryhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) u32 *opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) struct kvm_vcpu_arch *arch = &vcpu->arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) /* save old pc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) kvm_write_c0_guest_epc(cop0, arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) kvm_set_c0_guest_status(cop0, ST0_EXL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) if (cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) /* Set PC to the exception entry point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) kvm_change_c0_guest_cause(cop0, (0xff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) (EXCCODE_TLBS << CAUSEB_EXCCODE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) /* setup badvaddr, context and entryhi registers for the guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) /* XXXKYMA: is the context register used by linux??? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) kvm_write_c0_guest_entryhi(cop0, entryhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) u32 *opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) struct kvm_vcpu_arch *arch = &vcpu->arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) /* save old pc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) kvm_write_c0_guest_epc(cop0, arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) kvm_set_c0_guest_status(cop0, ST0_EXL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) if (cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) kvm_change_c0_guest_cause(cop0, (0xff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) (EXCCODE_MOD << CAUSEB_EXCCODE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) /* setup badvaddr, context and entryhi registers for the guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) /* XXXKYMA: is the context register used by linux??? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) kvm_write_c0_guest_entryhi(cop0, entryhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) u32 *opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) struct kvm_vcpu_arch *arch = &vcpu->arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) /* save old pc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) kvm_write_c0_guest_epc(cop0, arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) kvm_set_c0_guest_status(cop0, ST0_EXL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) if (cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) kvm_change_c0_guest_cause(cop0, (0xff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) (EXCCODE_CPU << CAUSEB_EXCCODE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) u32 *opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) struct kvm_vcpu_arch *arch = &vcpu->arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) /* save old pc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) kvm_write_c0_guest_epc(cop0, arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) kvm_set_c0_guest_status(cop0, ST0_EXL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) if (cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) kvm_change_c0_guest_cause(cop0, (0xff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) (EXCCODE_RI << CAUSEB_EXCCODE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) /* Set PC to the exception entry point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) kvm_err("Trying to deliver RI when EXL is already set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) u32 *opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) struct kvm_vcpu_arch *arch = &vcpu->arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) /* save old pc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) kvm_write_c0_guest_epc(cop0, arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) kvm_set_c0_guest_status(cop0, ST0_EXL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) if (cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) kvm_change_c0_guest_cause(cop0, (0xff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) (EXCCODE_BP << CAUSEB_EXCCODE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) /* Set PC to the exception entry point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) kvm_err("Trying to deliver BP when EXL is already set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) u32 *opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) struct kvm_vcpu_arch *arch = &vcpu->arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) /* save old pc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) kvm_write_c0_guest_epc(cop0, arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) kvm_set_c0_guest_status(cop0, ST0_EXL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) if (cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) kvm_change_c0_guest_cause(cop0, (0xff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) (EXCCODE_TR << CAUSEB_EXCCODE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) /* Set PC to the exception entry point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) kvm_err("Trying to deliver TRAP when EXL is already set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) u32 *opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) struct kvm_vcpu_arch *arch = &vcpu->arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) /* save old pc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) kvm_write_c0_guest_epc(cop0, arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) kvm_set_c0_guest_status(cop0, ST0_EXL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) if (cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) kvm_change_c0_guest_cause(cop0, (0xff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) (EXCCODE_MSAFPE << CAUSEB_EXCCODE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) /* Set PC to the exception entry point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) u32 *opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) struct kvm_vcpu_arch *arch = &vcpu->arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) /* save old pc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) kvm_write_c0_guest_epc(cop0, arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) kvm_set_c0_guest_status(cop0, ST0_EXL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) if (cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) kvm_change_c0_guest_cause(cop0, (0xff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) (EXCCODE_FPE << CAUSEB_EXCCODE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) /* Set PC to the exception entry point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) kvm_err("Trying to deliver FPE when EXL is already set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) u32 *opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) struct kvm_vcpu_arch *arch = &vcpu->arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) /* save old pc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) kvm_write_c0_guest_epc(cop0, arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) kvm_set_c0_guest_status(cop0, ST0_EXL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) if (cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) kvm_change_c0_guest_cause(cop0, (0xff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) (EXCCODE_MSADIS << CAUSEB_EXCCODE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) /* Set PC to the exception entry point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) kvm_err("Trying to deliver MSADIS when EXL is already set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) struct kvm_vcpu_arch *arch = &vcpu->arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) unsigned long curr_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) union mips_instruction inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) * Update PC and hold onto current PC in case there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) * an error and we want to rollback the PC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) curr_pc = vcpu->arch.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) er = update_pc(vcpu, cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) if (er == EMULATE_FAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) /* Fetch the instruction. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) if (cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) opc += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) err = kvm_get_badinstr(opc, vcpu, &inst.word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) kvm_err("%s: Cannot get inst @ %p (%d)\n", __func__, opc, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) return EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) if (inst.r_format.opcode == spec3_op &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) inst.r_format.func == rdhwr_op &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) inst.r_format.rs == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) (inst.r_format.re >> 3) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) int rd = inst.r_format.rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) int rt = inst.r_format.rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) int sel = inst.r_format.re & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) /* If usermode, check RDHWR rd is allowed by guest HWREna */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) rd, opc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) goto emulate_ri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) switch (rd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) case MIPS_HWR_CPUNUM: /* CPU number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) arch->gprs[rt] = vcpu->vcpu_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) case MIPS_HWR_SYNCISTEP: /* SYNCI length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) current_cpu_data.icache.linesz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) case MIPS_HWR_CC: /* Read count register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) case MIPS_HWR_CCRES: /* Count register resolution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) switch (current_cpu_data.cputype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) case CPU_20KC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) case CPU_25KF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) arch->gprs[rt] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) arch->gprs[rt] = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) case MIPS_HWR_ULR: /* Read UserLocal register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) goto emulate_ri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) vcpu->arch.gprs[rt]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) kvm_debug("Emulate RI not supported @ %p: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) opc, inst.word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) goto emulate_ri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) emulate_ri:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) * Rollback PC (if in branch delay slot then the PC already points to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) * branch target), and pass the RI exception to the guest OS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) vcpu->arch.pc = curr_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) return kvm_mips_emulate_ri_exc(cause, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) struct kvm_run *run = vcpu->run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) if (run->mmio.len > sizeof(*gpr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) kvm_err("Bad MMIO length: %d", run->mmio.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) /* Restore saved resume PC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) vcpu->arch.pc = vcpu->arch.io_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) switch (run->mmio.len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) switch (vcpu->mmio_needed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) case 11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) (((*(s64 *)run->mmio.data) & 0xff) << 56);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) case 12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) (((*(s64 *)run->mmio.data) & 0xffff) << 48);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) case 13:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) (((*(s64 *)run->mmio.data) & 0xffffff) << 40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) case 14:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) (((*(s64 *)run->mmio.data) & 0xffffffff) << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) case 15:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) (((*(s64 *)run->mmio.data) & 0xffffffffff) << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) (((*(s64 *)run->mmio.data) & 0xffffffffffff) << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) case 17:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) (((*(s64 *)run->mmio.data) & 0xffffffffffffff) << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) case 18:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) case 19:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) *gpr = *(s64 *)run->mmio.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) case 20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff00000000000000) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) ((((*(s64 *)run->mmio.data)) >> 8) & 0xffffffffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) case 21:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff000000000000) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) ((((*(s64 *)run->mmio.data)) >> 16) & 0xffffffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) case 22:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff0000000000) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) ((((*(s64 *)run->mmio.data)) >> 24) & 0xffffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) case 23:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff00000000) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) ((((*(s64 *)run->mmio.data)) >> 32) & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) case 24:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff000000) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) ((((*(s64 *)run->mmio.data)) >> 40) & 0xffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) case 25:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff0000) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) ((((*(s64 *)run->mmio.data)) >> 48) & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) case 26:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff00) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) ((((*(s64 *)run->mmio.data)) >> 56) & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) *gpr = *(s64 *)run->mmio.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) switch (vcpu->mmio_needed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) *gpr = *(u32 *)run->mmio.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) *gpr = *(s32 *)run->mmio.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) (((*(s32 *)run->mmio.data) & 0xff) << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) (((*(s32 *)run->mmio.data) & 0xffff) << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) (((*(s32 *)run->mmio.data) & 0xffffff) << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) case 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) *gpr = *(s32 *)run->mmio.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff000000) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) ((((*(s32 *)run->mmio.data)) >> 8) & 0xffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) case 9:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff0000) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) ((((*(s32 *)run->mmio.data)) >> 16) & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) case 10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff00) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) ((((*(s32 *)run->mmio.data)) >> 24) & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) *gpr = *(s32 *)run->mmio.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) if (vcpu->mmio_needed == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) *gpr = *(u16 *)run->mmio.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) *gpr = *(s16 *)run->mmio.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) if (vcpu->mmio_needed == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) *gpr = *(u8 *)run->mmio.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) *gpr = *(s8 *)run->mmio.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) static enum emulation_result kvm_mips_emulate_exc(u32 cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) u32 *opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) struct mips_coproc *cop0 = vcpu->arch.cop0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) struct kvm_vcpu_arch *arch = &vcpu->arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) /* save old pc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) kvm_write_c0_guest_epc(cop0, arch->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) kvm_set_c0_guest_status(cop0, ST0_EXL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) if (cause & CAUSEF_BD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) kvm_change_c0_guest_cause(cop0, (0xff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) (exccode << CAUSEB_EXCCODE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) /* Set PC to the exception entry point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) exccode, kvm_read_c0_guest_epc(cop0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) kvm_read_c0_guest_badvaddr(cop0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) kvm_err("Trying to deliver EXC when EXL is already set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) enum emulation_result kvm_mips_check_privilege(u32 cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) u32 *opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) if (usermode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) switch (exccode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) case EXCCODE_INT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) case EXCCODE_SYS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) case EXCCODE_BP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) case EXCCODE_RI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) case EXCCODE_TR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) case EXCCODE_MSAFPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) case EXCCODE_FPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) case EXCCODE_MSADIS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) case EXCCODE_CPU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) er = EMULATE_PRIV_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) case EXCCODE_MOD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) case EXCCODE_TLBL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) * We we are accessing Guest kernel space, then send an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) * address error exception to the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) kvm_debug("%s: LD MISS @ %#lx\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) badvaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) cause &= ~0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) er = EMULATE_PRIV_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) case EXCCODE_TLBS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) * We we are accessing Guest kernel space, then send an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) * address error exception to the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) kvm_debug("%s: ST MISS @ %#lx\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) badvaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) cause &= ~0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) cause |= (EXCCODE_ADES << CAUSEB_EXCCODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) er = EMULATE_PRIV_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) case EXCCODE_ADES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) kvm_debug("%s: address error ST @ %#lx\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) badvaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) cause &= ~0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) er = EMULATE_PRIV_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) case EXCCODE_ADEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) kvm_debug("%s: address error LD @ %#lx\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) badvaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) cause &= ~0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) er = EMULATE_PRIV_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) er = EMULATE_PRIV_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) if (er == EMULATE_PRIV_FAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) kvm_mips_emulate_exc(cause, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) * User Address (UA) fault, this could happen if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) * case we pass on the fault to the guest kernel and let it handle it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) * case we inject the TLB from the Guest TLB into the shadow host TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) u32 *opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) bool write_fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) enum emulation_result er = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) unsigned long va = vcpu->arch.host_cp0_badvaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) vcpu->arch.host_cp0_badvaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) * KVM would not have got the exception if this entry was valid in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) * shadow host TLB. Check the Guest TLB, if the entry is not there then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) * send the guest an exception. The guest exc handler should then inject
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) * an entry into the guest TLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) index = kvm_mips_guest_tlb_lookup(vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) (va & VPN2_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) KVM_ENTRYHI_ASID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) if (index < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) if (exccode == EXCCODE_TLBL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) er = kvm_mips_emulate_tlbmiss_ld(cause, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) } else if (exccode == EXCCODE_TLBS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) er = kvm_mips_emulate_tlbmiss_st(cause, opc, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) kvm_err("%s: invalid exc code: %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) exccode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) * Check if the entry is valid, if not then setup a TLB invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) * exception to the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) if (!TLB_IS_VALID(*tlb, va)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) if (exccode == EXCCODE_TLBL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) er = kvm_mips_emulate_tlbinv_ld(cause, opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) } else if (exccode == EXCCODE_TLBS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) er = kvm_mips_emulate_tlbinv_st(cause, opc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) kvm_err("%s: invalid exc code: %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) exccode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) * OK we have a Guest TLB entry, now inject it into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) * shadow host TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, va,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) write_fault)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) __func__, va, index, vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) read_c0_entryhi());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) er = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) return er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) }