^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * License. See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Generation of main entry point for the guest, exception handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2012 MIPS Technologies, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Authors: Sanjay Lal <sanjayl@kymasys.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Copyright (C) 2016 Imagination Technologies Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/msa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/tlbex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/uasm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /* Register names */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define ZERO 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define AT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define V0 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define V1 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define A0 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define A1 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #if _MIPS_SIM == _MIPS_SIM_ABI32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define T0 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define T1 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define T2 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define T3 11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define T0 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define T1 13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define T2 14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define T3 15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define S0 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define S1 17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define T9 25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define K0 26
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define K1 27
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define GP 28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define SP 29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define RA 31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /* Some CP0 registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define C0_PWBASE 5, 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define C0_HWRENA 7, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define C0_BADVADDR 8, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define C0_BADINSTR 8, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define C0_BADINSTRP 8, 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define C0_PGD 9, 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define C0_ENTRYHI 10, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define C0_GUESTCTL1 10, 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define C0_STATUS 12, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define C0_GUESTCTL0 12, 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define C0_CAUSE 13, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define C0_EPC 14, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define C0_EBASE 15, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define C0_CONFIG5 16, 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define C0_DDATA_LO 28, 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define C0_ERROREPC 30, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define CALLFRAME_SIZ 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define ST0_KX_IF_64 ST0_KX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define ST0_KX_IF_64 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static unsigned int scratch_vcpu[2] = { C0_DDATA_LO };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static unsigned int scratch_tmp[2] = { C0_ERROREPC };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) enum label_id {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) label_fpu_1 = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) label_msa_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) label_return_to_host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) label_kernel_asid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) label_exit_common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) UASM_L_LA(_fpu_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) UASM_L_LA(_msa_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) UASM_L_LA(_return_to_host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) UASM_L_LA(_kernel_asid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) UASM_L_LA(_exit_common)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static void *kvm_mips_build_enter_guest(void *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static void *kvm_mips_build_ret_from_exit(void *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static void *kvm_mips_build_ret_to_guest(void *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static void *kvm_mips_build_ret_to_host(void *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * The version of this function in tlbex.c uses current_cpu_type(), but for KVM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * we assume symmetry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static int c0_kscratch(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) switch (boot_cpu_type()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) case CPU_XLP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) case CPU_XLR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return 22;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return 31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * kvm_mips_entry_setup() - Perform global setup for entry code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * Perform global setup for entry code, such as choosing a scratch register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * Returns: 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * -errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) int kvm_mips_entry_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * We prefer to use KScratchN registers if they are available over the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * defaults above, which may not work on all cores.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) unsigned int kscratch_mask = cpu_data[0].kscratch_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (pgd_reg != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) kscratch_mask &= ~BIT(pgd_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /* Pick a scratch register for storing VCPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (kscratch_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) scratch_vcpu[0] = c0_kscratch();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) scratch_vcpu[1] = ffs(kscratch_mask) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) kscratch_mask &= ~BIT(scratch_vcpu[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* Pick a scratch register to use as a temp for saving state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (kscratch_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) scratch_tmp[0] = c0_kscratch();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) scratch_tmp[1] = ffs(kscratch_mask) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) kscratch_mask &= ~BIT(scratch_tmp[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) unsigned int frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* Save the VCPU scratch register value in cp0_epc of the stack frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) UASM_i_MFC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /* Save the temp scratch register value in cp0_cause of stack frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (scratch_tmp[0] == c0_kscratch()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) unsigned int frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * Restore host scratch register values saved by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * kvm_mips_build_save_scratch().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (scratch_tmp[0] == c0_kscratch()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * build_set_exc_base() - Assemble code to write exception base address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * @p: Code buffer pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * @reg: Source register (generated code may set WG bit in @reg).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * Assemble code to modify the exception base address in the EBase register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * using the appropriately sized access and setting the WG bit if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static inline void build_set_exc_base(u32 **p, unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (cpu_has_ebase_wg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* Set WG so that all the bits get written */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) uasm_i_ori(p, reg, reg, MIPS_EBASE_WG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) UASM_i_MTC0(p, reg, C0_EBASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) uasm_i_mtc0(p, reg, C0_EBASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * @addr: Address to start writing code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * Assemble the start of the vcpu_run function to run a guest VCPU. The function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * conforms to the following prototype:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * int vcpu_run(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * The exit from the guest and return to the caller is handled by the code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * generated by kvm_mips_build_ret_to_host().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * Returns: Next address after end of written function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) void *kvm_mips_build_vcpu_run(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) u32 *p = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * A0: vcpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* k0/k1 not being used in host kernel context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) UASM_i_ADDIU(&p, K1, SP, -(int)sizeof(struct pt_regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) for (i = 16; i < 32; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (i == 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) i = 28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /* Save host status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) uasm_i_mfc0(&p, V0, C0_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /* Save scratch registers, will be used to store pointer to vcpu etc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) kvm_mips_build_save_scratch(&p, V1, K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* VCPU scratch register has pointer to vcpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) UASM_i_MTC0(&p, A0, scratch_vcpu[0], scratch_vcpu[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /* Offset into vcpu->arch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) UASM_i_ADDIU(&p, K1, A0, offsetof(struct kvm_vcpu, arch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * Save the host stack to VCPU, used for exception processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * when we exit from the Guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* Save the kernel gp as well */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * Setup status register for running the guest in UM, interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * are disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) uasm_i_mtc0(&p, K0, C0_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) uasm_i_ehb(&p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /* load up the new EBASE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) build_set_exc_base(&p, K0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * Now that the new EBASE has been loaded, unset BEV, set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * interrupt mask as it was but make sure that timer interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * are enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) uasm_i_andi(&p, V0, V0, ST0_IM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) uasm_i_or(&p, K0, K0, V0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) uasm_i_mtc0(&p, K0, C0_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) uasm_i_ehb(&p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) p = kvm_mips_build_enter_guest(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * kvm_mips_build_enter_guest() - Assemble code to resume guest execution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * @addr: Address to start writing code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * Assemble the code to resume guest execution. This code is common between the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * initial entry into the guest from the host, and returning from the exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * handler back to the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * Returns: Next address after end of written function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static void *kvm_mips_build_enter_guest(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) u32 *p = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct uasm_label labels[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct uasm_reloc relocs[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct uasm_label __maybe_unused *l = labels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct uasm_reloc __maybe_unused *r = relocs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) memset(labels, 0, sizeof(labels));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) memset(relocs, 0, sizeof(relocs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* Set Guest EPC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) UASM_i_MTC0(&p, T0, C0_EPC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) #ifdef CONFIG_KVM_MIPS_VZ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* Save normal linux process pgd (VZ guarantees pgd_reg is set) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (cpu_has_ldpte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) UASM_i_MFC0(&p, K0, C0_PWBASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * Set up KVM GPA pgd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * - call tlbmiss_handler_setup_pgd(mm->pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * - write mm->pgd into CP0_PWBase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * We keep S0 pointing at struct kvm so we can load the ASID below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) UASM_i_LW(&p, S0, (int)offsetof(struct kvm_vcpu, kvm) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) (int)offsetof(struct kvm_vcpu, arch), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) UASM_i_LW(&p, A0, offsetof(struct kvm, arch.gpa_mm.pgd), S0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) uasm_i_jalr(&p, RA, T9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /* delay slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (cpu_has_htw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) UASM_i_MTC0(&p, A0, C0_PWBASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) uasm_i_nop(&p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* Set GM bit to setup eret to VZ guest context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) uasm_i_addiu(&p, V1, ZERO, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) uasm_i_ins(&p, K0, V1, MIPS_GCTL0_GM_SHIFT, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (cpu_has_guestid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * Set root mode GuestID, so that root TLB refill handler can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * use the correct GuestID in the root TLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /* Get current GuestID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /* Set GuestCtl1.RID = GuestCtl1.ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) uasm_i_ext(&p, T1, T0, MIPS_GCTL1_ID_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) MIPS_GCTL1_ID_WIDTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) uasm_i_ins(&p, T0, T1, MIPS_GCTL1_RID_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) MIPS_GCTL1_RID_WIDTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /* GuestID handles dealiasing so we don't need to touch ASID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) goto skip_asid_restore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /* Root ASID Dealias (RAD) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /* Save host ASID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) UASM_i_MFC0(&p, K0, C0_ENTRYHI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /* Set the root ASID for the Guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) UASM_i_ADDIU(&p, T1, S0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) offsetof(struct kvm, arch.gpa_mm.context.asid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* Set the ASID for the Guest Kernel or User */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) T0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) uasm_i_xori(&p, T0, T0, KSU_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) uasm_il_bnez(&p, &r, T0, label_kernel_asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) guest_kernel_mm.context.asid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* else user */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) guest_user_mm.context.asid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) uasm_l_kernel_asid(&l, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* t1: contains the base of the ASID array, need to get the cpu id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /* smp_processor_id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /* index the ASID array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) uasm_i_sll(&p, T2, T2, ilog2(sizeof(long)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) UASM_i_ADDU(&p, T3, T1, T2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) UASM_i_LW(&p, K0, 0, T3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * reuse ASID array offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * cpuinfo_mips is a multiple of sizeof(long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/sizeof(long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) uasm_i_mul(&p, T2, T2, T3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) UASM_i_ADDU(&p, AT, AT, T2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) uasm_i_and(&p, K0, K0, T2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) #ifndef CONFIG_KVM_MIPS_VZ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * Set up KVM T&E GVA pgd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * - call tlbmiss_handler_setup_pgd(mm->pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * - but skips write into CP0_PWBase for now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) UASM_i_LW(&p, A0, (int)offsetof(struct mm_struct, pgd) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) (int)offsetof(struct mm_struct, context.asid), T1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) uasm_i_jalr(&p, RA, T9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) uasm_i_mtc0(&p, K0, C0_ENTRYHI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /* Set up KVM VZ root ASID (!guestid) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) uasm_i_mtc0(&p, K0, C0_ENTRYHI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) skip_asid_restore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) uasm_i_ehb(&p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /* Disable RDHWR access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) uasm_i_mtc0(&p, ZERO, C0_HWRENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /* load the guest context from VCPU and return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) for (i = 1; i < 32; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /* Guest k0/k1 loaded later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (i == K0 || i == K1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) #ifndef CONFIG_CPU_MIPSR6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /* Restore hi/lo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) uasm_i_mthi(&p, K0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) uasm_i_mtlo(&p, K0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /* Restore the guest's k0/k1 registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /* Jump to guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) uasm_i_eret(&p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) uasm_resolve_relocs(relocs, labels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * kvm_mips_build_tlb_refill_exception() - Assemble TLB refill handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * @addr: Address to start writing code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * @handler: Address of common handler (within range of @addr).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * Assemble TLB refill exception fast path handler for guest execution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * Returns: Next address after end of written function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) u32 *p = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) struct uasm_label labels[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) struct uasm_reloc relocs[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) #ifndef CONFIG_CPU_LOONGSON64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) struct uasm_label *l = labels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct uasm_reloc *r = relocs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) memset(labels, 0, sizeof(labels));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) memset(relocs, 0, sizeof(relocs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /* Save guest k1 into scratch register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /* Get the VCPU pointer from the VCPU scratch register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /* Save guest k0 into VCPU structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * Some of the common tlbex code uses current_cpu_type(). For KVM we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * assume symmetry and just disable preemption to silence the warning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) #ifdef CONFIG_CPU_LOONGSON64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) UASM_i_MFC0(&p, K1, C0_PGD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) uasm_i_lddir(&p, K0, K1, 3); /* global page dir */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) #ifndef __PAGETABLE_PMD_FOLDED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) uasm_i_ldpte(&p, K1, 0); /* even */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) uasm_i_ldpte(&p, K1, 1); /* odd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) uasm_i_tlbwr(&p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * Now for the actual refill bit. A lot of this can be common with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * Linux TLB refill handler, however we don't need to handle so many
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * cases. We only need to handle user mode refills, and user mode runs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * with 32-bit addressing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * Therefore the branch to label_vmalloc generated by build_get_pmde64()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * that isn't resolved should never actually get taken and is harmless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * to leave in place for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /* we don't support huge pages yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) build_get_ptep(&p, K0, K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) build_update_entries(&p, K0, K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) build_tlb_write_entry(&p, &l, &r, tlb_random);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /* Get the VCPU pointer from the VCPU scratch register again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /* Restore the guest's k0/k1 registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) uasm_i_ehb(&p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) UASM_i_MFC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) /* Jump to guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) uasm_i_eret(&p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * kvm_mips_build_exception() - Assemble first level guest exception handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * @addr: Address to start writing code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * @handler: Address of common handler (within range of @addr).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * Assemble exception vector code for guest execution. The generated vector will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * branch to the common exception handler generated by kvm_mips_build_exit().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * Returns: Next address after end of written function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) void *kvm_mips_build_exception(void *addr, void *handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) u32 *p = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct uasm_label labels[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) struct uasm_reloc relocs[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) struct uasm_label *l = labels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) struct uasm_reloc *r = relocs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) memset(labels, 0, sizeof(labels));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) memset(relocs, 0, sizeof(relocs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) /* Save guest k1 into scratch register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) /* Get the VCPU pointer from the VCPU scratch register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /* Save guest k0 into VCPU structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /* Branch to the common handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) uasm_il_b(&p, &r, label_exit_common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) uasm_i_nop(&p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) uasm_l_exit_common(&l, handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) uasm_resolve_relocs(relocs, labels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * kvm_mips_build_exit() - Assemble common guest exit handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * @addr: Address to start writing code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * Assemble the generic guest exit handling code. This is called by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * exception vectors (generated by kvm_mips_build_exception()), and calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * kvm_mips_handle_exit(), then either resumes the guest or returns to the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * depending on the return value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * Returns: Next address after end of written function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) void *kvm_mips_build_exit(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) u32 *p = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct uasm_label labels[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) struct uasm_reloc relocs[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct uasm_label *l = labels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) struct uasm_reloc *r = relocs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) memset(labels, 0, sizeof(labels));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) memset(relocs, 0, sizeof(relocs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * Generic Guest exception handler. We end up here when the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * does something that causes a trap to kernel mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * Both k0/k1 registers will have already been saved (k0 into the vcpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * structure, and k1 into the scratch_tmp register).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * The k1 register will already contain the kvm_vcpu_arch pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /* Start saving Guest context to VCPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) for (i = 0; i < 32; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /* Guest k0/k1 saved later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (i == K0 || i == K1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) #ifndef CONFIG_CPU_MIPSR6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) /* We need to save hi/lo and restore them on the way out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) uasm_i_mfhi(&p, T0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) uasm_i_mflo(&p, T0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) /* Finally save guest k1 to VCPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) uasm_i_ehb(&p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /* Now that context has been saved, we can use other registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /* Restore vcpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) UASM_i_MFC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * the exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) UASM_i_MFC0(&p, K0, C0_EPC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) UASM_i_MFC0(&p, K0, C0_BADVADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) uasm_i_mfc0(&p, K0, C0_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (cpu_has_badinstr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) uasm_i_mfc0(&p, K0, C0_BADINSTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) host_cp0_badinstr), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (cpu_has_badinstrp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) uasm_i_mfc0(&p, K0, C0_BADINSTRP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) host_cp0_badinstrp), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /* Now restore the host state just enough to run the handlers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /* Switch EBASE to the one used by Linux */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /* load up the host EBASE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) uasm_i_mfc0(&p, V0, C0_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) uasm_i_lui(&p, AT, ST0_BEV >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) uasm_i_or(&p, K0, V0, AT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) uasm_i_mtc0(&p, K0, C0_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) uasm_i_ehb(&p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) UASM_i_LA_mostly(&p, K0, (long)&ebase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) build_set_exc_base(&p, K0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (raw_cpu_has_fpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * If FPU is enabled, save FCR31 and clear it so that later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * ctc1's don't trigger FPE for pending exceptions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) uasm_i_lui(&p, AT, ST0_CU1 >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) uasm_i_and(&p, V1, V0, AT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) uasm_il_beqz(&p, &r, V1, label_fpu_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) uasm_i_nop(&p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) uasm_i_cfc1(&p, T0, 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) uasm_i_ctc1(&p, ZERO, 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) uasm_l_fpu_1(&l, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (cpu_has_msa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * If MSA is enabled, save MSACSR and clear it so that later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * instructions don't trigger MSAFPE for pending exceptions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) uasm_i_mfc0(&p, T0, C0_CONFIG5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) uasm_il_beqz(&p, &r, T0, label_msa_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) uasm_i_nop(&p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) uasm_i_cfcmsa(&p, T0, MSA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) uasm_l_msa_1(&l, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) #ifdef CONFIG_KVM_MIPS_VZ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /* Restore host ASID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) if (!cpu_has_guestid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) UASM_i_MTC0(&p, K0, C0_ENTRYHI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * Set up normal Linux process pgd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * - call tlbmiss_handler_setup_pgd(mm->pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * - write mm->pgd into CP0_PWBase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) UASM_i_LW(&p, A0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) offsetof(struct kvm_vcpu_arch, host_pgd), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) uasm_i_jalr(&p, RA, T9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /* delay slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (cpu_has_htw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) UASM_i_MTC0(&p, A0, C0_PWBASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) uasm_i_nop(&p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) /* Clear GM bit so we don't enter guest mode when EXL is cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) uasm_i_ins(&p, K0, ZERO, MIPS_GCTL0_GM_SHIFT, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /* Save GuestCtl0 so we can access GExcCode after CPU migration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) uasm_i_sw(&p, K0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) offsetof(struct kvm_vcpu_arch, host_cp0_guestctl0), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (cpu_has_guestid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * Clear root mode GuestID, so that root TLB operations use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * root GuestID in the root TLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) /* Set GuestCtl1.RID = MIPS_GCTL1_ROOT_GUESTID (i.e. 0) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) uasm_i_ins(&p, T0, ZERO, MIPS_GCTL1_RID_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) MIPS_GCTL1_RID_WIDTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) uasm_i_and(&p, V0, V0, AT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) uasm_i_lui(&p, AT, ST0_CU0 >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) uasm_i_or(&p, V0, V0, AT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) uasm_i_ori(&p, V0, V0, ST0_SX | ST0_UX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) uasm_i_mtc0(&p, V0, C0_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) uasm_i_ehb(&p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) /* Load up host GP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /* Need a stack before we can jump to "C" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /* Saved host state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * XXXKYMA do we need to load the host ASID, maybe not because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * kernel entries are marked GLOBAL, need to verify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /* Restore host scratch registers, as we'll have clobbered them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) kvm_mips_build_restore_scratch(&p, K0, SP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /* Restore RDHWR access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) UASM_i_LA_mostly(&p, K0, (long)&hwrena);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) uasm_i_mtc0(&p, K0, C0_HWRENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) /* Jump to handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * XXXKYMA: not sure if this is safe, how large is the stack??
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * Now jump to the kvm_mips_handle_exit() to see if we can deal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * with this in the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) uasm_i_move(&p, A0, S0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) uasm_i_jalr(&p, RA, T9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) uasm_resolve_relocs(relocs, labels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) p = kvm_mips_build_ret_from_exit(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * kvm_mips_build_ret_from_exit() - Assemble guest exit return handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * @addr: Address to start writing code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * Assemble the code to handle the return from kvm_mips_handle_exit(), either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * resuming the guest or returning to the host depending on the return value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * Returns: Next address after end of written function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) static void *kvm_mips_build_ret_from_exit(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) u32 *p = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) struct uasm_label labels[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) struct uasm_reloc relocs[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) struct uasm_label *l = labels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) struct uasm_reloc *r = relocs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) memset(labels, 0, sizeof(labels));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) memset(relocs, 0, sizeof(relocs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) /* Return from handler Make sure interrupts are disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) uasm_i_di(&p, ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) uasm_i_ehb(&p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * XXXKYMA: k0/k1 could have been blown away if we processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * an exception while we were handling the exception from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * guest, reload k1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) uasm_i_move(&p, K1, S0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * Check return value, should tell us if we are returning to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * host (handle I/O etc)or resuming the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) uasm_i_andi(&p, T0, V0, RESUME_HOST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) uasm_il_bnez(&p, &r, T0, label_return_to_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) uasm_i_nop(&p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) p = kvm_mips_build_ret_to_guest(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) uasm_l_return_to_host(&l, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) p = kvm_mips_build_ret_to_host(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) uasm_resolve_relocs(relocs, labels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * kvm_mips_build_ret_to_guest() - Assemble code to return to the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * @addr: Address to start writing code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * Assemble the code to handle return from the guest exit handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * (kvm_mips_handle_exit()) back to the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * Returns: Next address after end of written function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) static void *kvm_mips_build_ret_to_guest(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) u32 *p = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /* Put the saved pointer to vcpu (s0) back into the scratch register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) UASM_i_MTC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) /* Load up the Guest EBASE to minimize the window where BEV is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) /* Switch EBASE back to the one used by KVM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) uasm_i_mfc0(&p, V1, C0_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) uasm_i_lui(&p, AT, ST0_BEV >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) uasm_i_or(&p, K0, V1, AT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) uasm_i_mtc0(&p, K0, C0_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) uasm_i_ehb(&p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) build_set_exc_base(&p, T0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) /* Setup status register for running guest in UM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) uasm_i_and(&p, V1, V1, AT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) uasm_i_mtc0(&p, V1, C0_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) uasm_i_ehb(&p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) p = kvm_mips_build_enter_guest(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * kvm_mips_build_ret_to_host() - Assemble code to return to the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * @addr: Address to start writing code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * Assemble the code to handle return from the guest exit handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * function generated by kvm_mips_build_vcpu_run().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * Returns: Next address after end of written function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) static void *kvm_mips_build_ret_to_host(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) u32 *p = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /* EBASE is already pointing to Linux */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) UASM_i_ADDIU(&p, K1, K1, -(int)sizeof(struct pt_regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * r2/v0 is the return code, shift it down by 2 (arithmetic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * to recover the err code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) uasm_i_sra(&p, K0, V0, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) uasm_i_move(&p, V0, K0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) /* Load context saved on the host stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) for (i = 16; i < 31; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (i == 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) i = 28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) /* Restore RDHWR access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) UASM_i_LA_mostly(&p, K0, (long)&hwrena);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) uasm_i_mtc0(&p, K0, C0_HWRENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) /* Restore RA, which is the address we will return to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) uasm_i_jr(&p, RA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) uasm_i_nop(&p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)