^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright SUSE Linux Products GmbH 2009
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Authors: Alexander Graf <agraf@suse.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/ppc_asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/kvm_asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/reg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/exception-64s.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/asm-compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #if defined(CONFIG_PPC_BOOK3S_64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #ifdef PPC64_ELF_ABI_v2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define FUNC(name) name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define FUNC(name) GLUE(.,name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #elif defined(CONFIG_PPC_BOOK3S_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define FUNC(name) name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define GET_SHADOW_VCPU(reg) lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #endif /* CONFIG_PPC_BOOK3S_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define VCPU_LOAD_NVGPRS(vcpu) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) PPC_LL r14, VCPU_GPR(R14)(vcpu); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) PPC_LL r15, VCPU_GPR(R15)(vcpu); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) PPC_LL r16, VCPU_GPR(R16)(vcpu); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) PPC_LL r17, VCPU_GPR(R17)(vcpu); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) PPC_LL r18, VCPU_GPR(R18)(vcpu); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) PPC_LL r19, VCPU_GPR(R19)(vcpu); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) PPC_LL r20, VCPU_GPR(R20)(vcpu); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) PPC_LL r21, VCPU_GPR(R21)(vcpu); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) PPC_LL r22, VCPU_GPR(R22)(vcpu); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) PPC_LL r23, VCPU_GPR(R23)(vcpu); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) PPC_LL r24, VCPU_GPR(R24)(vcpu); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) PPC_LL r25, VCPU_GPR(R25)(vcpu); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) PPC_LL r26, VCPU_GPR(R26)(vcpu); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) PPC_LL r27, VCPU_GPR(R27)(vcpu); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) PPC_LL r28, VCPU_GPR(R28)(vcpu); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) PPC_LL r29, VCPU_GPR(R29)(vcpu); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) PPC_LL r30, VCPU_GPR(R30)(vcpu); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) PPC_LL r31, VCPU_GPR(R31)(vcpu); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /*****************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * Guest entry / exit code that is in kernel module memory (highmem) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) ****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* Registers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * r3: vcpu pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) _GLOBAL(__kvmppc_vcpu_run)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) kvm_start_entry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* Write correct stack frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) mflr r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) PPC_STL r0,PPC_LR_STKOFF(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* Save host state to the stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) PPC_STLU r1, -SWITCH_FRAME_SIZE(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* Save r3 (vcpu) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) SAVE_GPR(3, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* Save non-volatile registers (r14 - r31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) SAVE_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /* Save CR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) mfcr r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) stw r14, _CCR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* Save LR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) PPC_STL r0, _LINK(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* Load non-volatile guest state from the vcpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) VCPU_LOAD_NVGPRS(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) kvm_start_lightweight:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* Copy registers into shadow vcpu so we can access them in real mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) bl FUNC(kvmppc_copy_to_svcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) REST_GPR(3, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* Get the dcbz32 flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) PPC_LL r0, VCPU_HFLAGS(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) rldicl r0, r0, 0, 63 /* r3 &= 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) stb r0, HSTATE_RESTORE_HID5(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* Load up guest SPRG3 value, since it's user readable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) lbz r4, VCPU_SHAREDBE(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) cmpwi r4, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) ld r5, VCPU_SHARED(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) beq sprg3_little_endian
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) sprg3_big_endian:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #ifdef __BIG_ENDIAN__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) ld r4, VCPU_SHARED_SPRG3(r5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) addi r5, r5, VCPU_SHARED_SPRG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) ldbrx r4, 0, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) b after_sprg3_load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) sprg3_little_endian:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #ifdef __LITTLE_ENDIAN__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) ld r4, VCPU_SHARED_SPRG3(r5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) addi r5, r5, VCPU_SHARED_SPRG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) ldbrx r4, 0, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) after_sprg3_load:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) mtspr SPRN_SPRG3, r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #endif /* CONFIG_PPC_BOOK3S_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) PPC_LL r4, VCPU_SHADOW_MSR(r3) /* get shadow_msr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* Jump to segment patching handler and into our guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) bl FUNC(kvmppc_entry_trampoline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * This is the handler in module memory. It gets jumped at from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * lowmem trampoline code, so it's basically the guest exit code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * Register usage at this point:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * R1 = host R1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * R2 = host R2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * R12 = exit handler id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * R13 = PACA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * SVCPU.* = guest *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * MSR.EE = 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) PPC_LL r3, GPR3(r1) /* vcpu pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * kvmppc_copy_from_svcpu can clobber volatile registers, save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * the exit handler id to the vcpu and restore it from there later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) stw r12, VCPU_TRAP(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* Transfer reg values from shadow vcpu back to vcpu struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) bl FUNC(kvmppc_copy_from_svcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * Reload kernel SPRG3 value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * No need to save guest value as usermode can't modify SPRG3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) ld r3, PACA_SPRG_VDSO(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) mtspr SPRN_SPRG_VDSO_WRITE, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #endif /* CONFIG_PPC_BOOK3S_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* R7 = vcpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) PPC_LL r7, GPR3(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) PPC_STL r14, VCPU_GPR(R14)(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) PPC_STL r15, VCPU_GPR(R15)(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) PPC_STL r16, VCPU_GPR(R16)(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) PPC_STL r17, VCPU_GPR(R17)(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) PPC_STL r18, VCPU_GPR(R18)(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) PPC_STL r19, VCPU_GPR(R19)(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) PPC_STL r20, VCPU_GPR(R20)(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) PPC_STL r21, VCPU_GPR(R21)(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) PPC_STL r22, VCPU_GPR(R22)(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) PPC_STL r23, VCPU_GPR(R23)(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) PPC_STL r24, VCPU_GPR(R24)(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) PPC_STL r25, VCPU_GPR(R25)(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) PPC_STL r26, VCPU_GPR(R26)(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) PPC_STL r27, VCPU_GPR(R27)(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) PPC_STL r28, VCPU_GPR(R28)(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) PPC_STL r29, VCPU_GPR(R29)(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) PPC_STL r30, VCPU_GPR(R30)(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) PPC_STL r31, VCPU_GPR(R31)(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* Pass the exit number as 2nd argument to kvmppc_handle_exit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) lwz r4, VCPU_TRAP(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* Restore r3 (vcpu) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) REST_GPR(3, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) bl FUNC(kvmppc_handle_exit_pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* If RESUME_GUEST, get back in the loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) cmpwi r3, RESUME_GUEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) beq kvm_loop_lightweight
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) cmpwi r3, RESUME_GUEST_NV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) beq kvm_loop_heavyweight
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) kvm_exit_loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) PPC_LL r4, _LINK(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) mtlr r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) lwz r14, _CCR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) mtcr r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /* Restore non-volatile host registers (r14 - r31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) REST_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) addi r1, r1, SWITCH_FRAME_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) kvm_loop_heavyweight:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) PPC_LL r4, _LINK(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* Load vcpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) REST_GPR(3, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* Load non-volatile guest state from the vcpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) VCPU_LOAD_NVGPRS(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /* Jump back into the beginning of this function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) b kvm_start_lightweight
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) kvm_loop_lightweight:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* We'll need the vcpu pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) REST_GPR(3, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /* Jump back into the beginning of this function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) b kvm_start_lightweight