^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright SUSE Linux Products GmbH 2010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Authors: Alexander Graf <agraf@suse.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) /* Real mode helpers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/asm-compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/feature-fixups.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #if defined(CONFIG_PPC_BOOK3S_64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define GET_SHADOW_VCPU(reg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) mr reg, r13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #elif defined(CONFIG_PPC_BOOK3S_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define GET_SHADOW_VCPU(reg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) tophys(reg, r2); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) tophys(reg, reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /* Disable for nested KVM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define USE_QUICK_LAST_INST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* Get helper functions for subarch specific functionality */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #if defined(CONFIG_PPC_BOOK3S_64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "book3s_64_slb.S"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #elif defined(CONFIG_PPC_BOOK3S_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "book3s_32_sr.S"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * Entry code *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) *****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) .global kvmppc_handler_trampoline_enter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) kvmppc_handler_trampoline_enter:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* Required state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * MSR = ~IR|DR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * R1 = host R1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * R2 = host R2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * R4 = guest shadow MSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * R5 = normal host MSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * R6 = current host MSR (EE, IR, DR off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * LR = highmem guest exit code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * all other volatile GPRS = free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * SVCPU[CR] = guest CR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * SVCPU[XER] = guest XER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * SVCPU[CTR] = guest CTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * SVCPU[LR] = guest LR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* r3 = shadow vcpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) GET_SHADOW_VCPU(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* Save guest exit handler address and MSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) mflr r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) PPC_STL r0, HSTATE_VMHANDLER(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) PPC_STL r5, HSTATE_HOST_MSR(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* Save R1/R2 in the PACA (64-bit) or shadow_vcpu (32-bit) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) PPC_STL r1, HSTATE_HOST_R1(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) PPC_STL r2, HSTATE_HOST_R2(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* Activate guest mode, so faults get handled by KVM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) li r11, KVM_GUEST_MODE_GUEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) stb r11, HSTATE_IN_GUEST(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* Switch to guest segment. This is subarch specific. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) LOAD_GUEST_SEGMENTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /* Save host FSCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) mfspr r8, SPRN_FSCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) std r8, HSTATE_HOST_FSCR(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* Set FSCR during guest execution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) ld r9, SVCPU_SHADOW_FSCR(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) mtspr SPRN_FSCR, r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* Some guests may need to have dcbz set to 32 byte length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * Usually we ensure that by patching the guest's instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * to trap on dcbz and emulate it in the hypervisor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * If we can, we should tell the CPU to use 32 byte dcbz though,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * because that's a lot faster.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) lbz r0, HSTATE_RESTORE_HID5(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) cmpwi r0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) beq no_dcbz32_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) mfspr r0,SPRN_HID5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) ori r0, r0, 0x80 /* XXX HID5_dcbz32 = 0x80 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) mtspr SPRN_HID5,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) no_dcbz32_on:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #endif /* CONFIG_PPC_BOOK3S_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* Enter guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) PPC_LL r8, SVCPU_CTR(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) PPC_LL r9, SVCPU_LR(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) lwz r10, SVCPU_CR(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) PPC_LL r11, SVCPU_XER(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) mtctr r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) mtlr r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) mtcr r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) mtxer r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* Move SRR0 and SRR1 into the respective regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) PPC_LL r9, SVCPU_PC(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* First clear RI in our current MSR value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) li r0, MSR_RI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) andc r6, r6, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) PPC_LL r0, SVCPU_R0(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) PPC_LL r1, SVCPU_R1(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) PPC_LL r2, SVCPU_R2(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) PPC_LL r5, SVCPU_R5(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) PPC_LL r7, SVCPU_R7(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) PPC_LL r8, SVCPU_R8(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) PPC_LL r10, SVCPU_R10(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) PPC_LL r11, SVCPU_R11(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) PPC_LL r12, SVCPU_R12(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) PPC_LL r13, SVCPU_R13(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) MTMSR_EERI(r6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) mtsrr0 r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) mtsrr1 r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) PPC_LL r4, SVCPU_R4(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) PPC_LL r6, SVCPU_R6(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) PPC_LL r9, SVCPU_R9(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) PPC_LL r3, (SVCPU_R3)(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) RFI_TO_GUEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) kvmppc_handler_trampoline_enter_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * Exit code *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) *****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) .global kvmppc_interrupt_pr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) kvmppc_interrupt_pr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* 64-bit entry. Register usage at this point:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * SPRG_SCRATCH0 = guest R13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * R12 = (guest CR << 32) | exit handler id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * R13 = PACA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * HSTATE.SCRATCH0 = guest R12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* Match 32-bit entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) rotldi r12, r12, 32 /* Flip R12 halves for stw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) stw r12, HSTATE_SCRATCH1(r13) /* CR is now in the low half */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) srdi r12, r12, 32 /* shift trap into low half */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) .global kvmppc_handler_trampoline_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) kvmppc_handler_trampoline_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* Register usage at this point:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * SPRG_SCRATCH0 = guest R13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * R12 = exit handler id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * R13 = shadow vcpu (32-bit) or PACA (64-bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * HSTATE.SCRATCH0 = guest R12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * HSTATE.SCRATCH1 = guest CR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* Save registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) PPC_STL r0, SVCPU_R0(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) PPC_STL r1, SVCPU_R1(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) PPC_STL r2, SVCPU_R2(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) PPC_STL r3, SVCPU_R3(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) PPC_STL r4, SVCPU_R4(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) PPC_STL r5, SVCPU_R5(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) PPC_STL r6, SVCPU_R6(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) PPC_STL r7, SVCPU_R7(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) PPC_STL r8, SVCPU_R8(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) PPC_STL r9, SVCPU_R9(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) PPC_STL r10, SVCPU_R10(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) PPC_STL r11, SVCPU_R11(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /* Restore R1/R2 so we can handle faults */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) PPC_LL r1, HSTATE_HOST_R1(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) PPC_LL r2, HSTATE_HOST_R2(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /* Save guest PC and MSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) andi. r0, r12, 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) cmpwi cr1, r0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) beq 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) mfspr r3,SPRN_HSRR0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) mfspr r4,SPRN_HSRR1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) andi. r12,r12,0x3ffd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) b 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 1: mfsrr0 r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) mfsrr1 r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) PPC_STL r3, SVCPU_PC(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) PPC_STL r4, SVCPU_SHADOW_SRR1(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /* Get scratch'ed off registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) GET_SCRATCH0(r9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) PPC_LL r8, HSTATE_SCRATCH0(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) lwz r7, HSTATE_SCRATCH1(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) PPC_STL r9, SVCPU_R13(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) PPC_STL r8, SVCPU_R12(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) stw r7, SVCPU_CR(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* Save more register state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) mfxer r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) mfdar r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) mfdsisr r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) mfctr r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) mflr r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) PPC_STL r5, SVCPU_XER(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) PPC_STL r6, SVCPU_FAULT_DAR(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) stw r7, SVCPU_FAULT_DSISR(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) PPC_STL r8, SVCPU_CTR(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) PPC_STL r9, SVCPU_LR(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * In order for us to easily get the last instruction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * we got the #vmexit at, we exploit the fact that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * virtual layout is still the same here, so we can just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * ld from the guest's PC address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /* We only load the last instruction when it's safe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) cmpwi r12, BOOK3S_INTERRUPT_DATA_STORAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) beq ld_last_inst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) cmpwi r12, BOOK3S_INTERRUPT_PROGRAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) beq ld_last_inst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) cmpwi r12, BOOK3S_INTERRUPT_SYSCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) beq ld_last_prev_inst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) beq- ld_last_inst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) cmpwi r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) beq- ld_last_inst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) cmpwi r12, BOOK3S_INTERRUPT_FAC_UNAVAIL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) beq- ld_last_inst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) b no_ld_last_inst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) ld_last_prev_inst:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) addi r3, r3, -4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) ld_last_inst:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /* Save off the guest instruction we're at */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /* In case lwz faults */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) li r0, KVM_INST_FETCH_FAILED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) #ifdef USE_QUICK_LAST_INST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /* Set guest mode to 'jump over instruction' so if lwz faults
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * we'll just continue at the next IP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) li r9, KVM_GUEST_MODE_SKIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) stb r9, HSTATE_IN_GUEST(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /* 1) enable paging for data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) mfmsr r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) ori r11, r9, MSR_DR /* Enable paging for data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) mtmsr r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* 2) fetch the instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) lwz r0, 0(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /* 3) disable paging again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) mtmsr r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) stw r0, SVCPU_LAST_INST(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) no_ld_last_inst:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /* Unset guest mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) li r9, KVM_GUEST_MODE_NONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) stb r9, HSTATE_IN_GUEST(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /* Switch back to host MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) LOAD_HOST_SEGMENTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) lbz r5, HSTATE_RESTORE_HID5(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) cmpwi r5, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) beq no_dcbz32_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) li r4, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) mfspr r5,SPRN_HID5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) rldimi r5,r4,6,56
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) mtspr SPRN_HID5,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) no_dcbz32_off:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* Save guest FSCR on a FAC_UNAVAIL interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) cmpwi r12, BOOK3S_INTERRUPT_FAC_UNAVAIL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) bne+ no_fscr_save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) mfspr r7, SPRN_FSCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) std r7, SVCPU_SHADOW_FSCR(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) no_fscr_save:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* Restore host FSCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) ld r8, HSTATE_HOST_FSCR(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) mtspr SPRN_FSCR, r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) #endif /* CONFIG_PPC_BOOK3S_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * For some interrupts, we need to call the real Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * handler, so it can do work for us. This has to happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * as if the interrupt arrived from the kernel though,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * so let's fake it here where most state is restored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * Having set up SRR0/1 with the address where we want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * to continue with relocation on (potentially in module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * space), we either just go straight there with rfi[d],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * or we jump to an interrupt handler if there is an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * interrupt to be handled first. In the latter case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * the rfi[d] at the end of the interrupt handler will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * get us back to where we want to continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /* Register usage at this point:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * R1 = host R1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * R2 = host R2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * R10 = raw exit handler id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * R12 = exit handler id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * R13 = shadow vcpu (32-bit) or PACA (64-bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * SVCPU.* = guest *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) PPC_LL r6, HSTATE_HOST_MSR(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * We don't want to change MSR[TS] bits via rfi here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * The actual TM handling logic will be in host with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * recovered DR/IR bits after HSTATE_VMHANDLER.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * And MSR_TM can be enabled in HOST_MSR so rfid may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * not suppress this change and can lead to exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * Manually set MSR to prevent TS state change here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) mfmsr r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) rldicl r7, r7, 64 - MSR_TS_S_LG, 62
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) rldimi r6, r7, MSR_TS_S_LG, 63 - MSR_TS_T_LG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) PPC_LL r8, HSTATE_VMHANDLER(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) beq cr1, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) mtspr SPRN_HSRR1, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) mtspr SPRN_HSRR0, r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 1: /* Restore host msr -> SRR1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) mtsrr1 r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /* Load highmem handler address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) mtsrr0 r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) /* RFI into the highmem handler, or jump to interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) beqa BOOK3S_INTERRUPT_EXTERNAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) beqa BOOK3S_INTERRUPT_DECREMENTER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) cmpwi r12, BOOK3S_INTERRUPT_PERFMON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) beqa BOOK3S_INTERRUPT_PERFMON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) cmpwi r12, BOOK3S_INTERRUPT_DOORBELL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) beqa BOOK3S_INTERRUPT_DOORBELL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) RFI_TO_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) kvmppc_handler_trampoline_exit_end: