^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2010-2011 Freescale Semiconductor, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Alexander Graf <agraf@suse.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kmemleak.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kvm_para.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/reg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/disassemble.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/ppc-opcode.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/epapr_hcalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define KVM_MAGIC_PAGE (-4096L)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define KVM_INST_LWZ 0x80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define KVM_INST_STW 0x90000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define KVM_INST_LD 0xe8000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define KVM_INST_STD 0xf8000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define KVM_INST_NOP 0x60000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define KVM_INST_B 0x48000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define KVM_INST_B_MASK 0x03ffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define KVM_INST_B_MAX 0x01ffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define KVM_INST_LI 0x38000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define KVM_MASK_RT 0x03e00000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define KVM_RT_30 0x03c00000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define KVM_MASK_RB 0x0000f800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define KVM_INST_MFMSR 0x7c0000a6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define SPR_FROM 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define SPR_TO 0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) (((sprn) & 0x1f) << 16) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) (((sprn) & 0x3e0) << 6) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) (moveto))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define KVM_INST_MFSPR(sprn) KVM_INST_SPR(sprn, SPR_FROM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define KVM_INST_MTSPR(sprn) KVM_INST_SPR(sprn, SPR_TO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define KVM_INST_TLBSYNC 0x7c00046c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define KVM_INST_MTMSRD_L0 0x7c000164
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define KVM_INST_MTMSRD_L1 0x7c010164
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define KVM_INST_MTMSR 0x7c000124
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define KVM_INST_WRTEE 0x7c000106
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define KVM_INST_WRTEEI_0 0x7c000146
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define KVM_INST_WRTEEI_1 0x7c008146
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define KVM_INST_MTSRIN 0x7c0001e4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static bool kvm_patching_worked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) extern char kvm_tmp[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) extern char kvm_tmp_end[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static int kvm_tmp_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static void __init kvm_patch_ins(u32 *inst, u32 new_inst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *inst = new_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) flush_icache_range((ulong)inst, (ulong)inst + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static void __init kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static void __init kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static void __init kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static void __init kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static void __init kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static void __init kvm_patch_ins_nop(u32 *inst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) kvm_patch_ins(inst, KVM_INST_NOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static void __init kvm_patch_ins_b(u32 *inst, int addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* On relocatable kernels interrupts handlers and our code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) can be in different regions, so we don't patch them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if ((ulong)inst < (ulong)&__end_interrupts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static u32 * __init kvm_alloc(int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) u32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if ((kvm_tmp_index + len) > (kvm_tmp_end - kvm_tmp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) printk(KERN_ERR "KVM: No more space (%d + %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) kvm_tmp_index, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) kvm_patching_worked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) p = (void*)&kvm_tmp[kvm_tmp_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) kvm_tmp_index += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) extern u32 kvm_emulate_mtmsrd_branch_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) extern u32 kvm_emulate_mtmsrd_reg_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) extern u32 kvm_emulate_mtmsrd_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) extern u32 kvm_emulate_mtmsrd[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static void __init kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) u32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) int distance_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) int distance_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) ulong next_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* Find out where we are and put everything there */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) distance_start = (ulong)p - (ulong)inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) next_inst = ((ulong)inst + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* Make sure we only write valid b instructions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (distance_start > KVM_INST_B_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) kvm_patching_worked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* Modify the chunk to fit the invocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) switch (get_rt(rt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) case 30:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) magic_var(scratch2), KVM_RT_30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) case 31:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) magic_var(scratch1), KVM_RT_30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) p[kvm_emulate_mtmsrd_reg_offs] |= rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /* Patch the invocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) kvm_patch_ins_b(inst, distance_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) extern u32 kvm_emulate_mtmsr_branch_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) extern u32 kvm_emulate_mtmsr_reg1_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) extern u32 kvm_emulate_mtmsr_reg2_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) extern u32 kvm_emulate_mtmsr_orig_ins_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) extern u32 kvm_emulate_mtmsr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) extern u32 kvm_emulate_mtmsr[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static void __init kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) u32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) int distance_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) int distance_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) ulong next_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* Find out where we are and put everything there */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) distance_start = (ulong)p - (ulong)inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) next_inst = ((ulong)inst + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* Make sure we only write valid b instructions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (distance_start > KVM_INST_B_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) kvm_patching_worked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /* Modify the chunk to fit the invocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /* Make clobbered registers work too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) switch (get_rt(rt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) case 30:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) magic_var(scratch2), KVM_RT_30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) magic_var(scratch2), KVM_RT_30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) case 31:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) magic_var(scratch1), KVM_RT_30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) magic_var(scratch1), KVM_RT_30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) p[kvm_emulate_mtmsr_reg1_offs] |= rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) p[kvm_emulate_mtmsr_reg2_offs] |= rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* Patch the invocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) kvm_patch_ins_b(inst, distance_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) #ifdef CONFIG_BOOKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) extern u32 kvm_emulate_wrtee_branch_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) extern u32 kvm_emulate_wrtee_reg_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) extern u32 kvm_emulate_wrtee_orig_ins_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) extern u32 kvm_emulate_wrtee_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) extern u32 kvm_emulate_wrtee[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static void __init kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) u32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) int distance_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) int distance_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ulong next_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) p = kvm_alloc(kvm_emulate_wrtee_len * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /* Find out where we are and put everything there */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) distance_start = (ulong)p - (ulong)inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) next_inst = ((ulong)inst + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /* Make sure we only write valid b instructions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (distance_start > KVM_INST_B_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) kvm_patching_worked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /* Modify the chunk to fit the invocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (imm_one) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) p[kvm_emulate_wrtee_reg_offs] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) KVM_INST_LI | __PPC_RT(R30) | MSR_EE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* Make clobbered registers work too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) switch (get_rt(rt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) case 30:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) magic_var(scratch2), KVM_RT_30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) case 31:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) magic_var(scratch1), KVM_RT_30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) p[kvm_emulate_wrtee_reg_offs] |= rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /* Patch the invocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) kvm_patch_ins_b(inst, distance_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) extern u32 kvm_emulate_wrteei_0_branch_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) extern u32 kvm_emulate_wrteei_0_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) extern u32 kvm_emulate_wrteei_0[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static void __init kvm_patch_ins_wrteei_0(u32 *inst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) u32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) int distance_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) int distance_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) ulong next_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* Find out where we are and put everything there */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) distance_start = (ulong)p - (ulong)inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) next_inst = ((ulong)inst + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* Make sure we only write valid b instructions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (distance_start > KVM_INST_B_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) kvm_patching_worked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /* Patch the invocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) kvm_patch_ins_b(inst, distance_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) #ifdef CONFIG_PPC_BOOK3S_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) extern u32 kvm_emulate_mtsrin_branch_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) extern u32 kvm_emulate_mtsrin_reg1_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) extern u32 kvm_emulate_mtsrin_reg2_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) extern u32 kvm_emulate_mtsrin_orig_ins_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) extern u32 kvm_emulate_mtsrin_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) extern u32 kvm_emulate_mtsrin[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static void __init kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) u32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) int distance_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) int distance_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) ulong next_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /* Find out where we are and put everything there */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) distance_start = (ulong)p - (ulong)inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) next_inst = ((ulong)inst + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /* Make sure we only write valid b instructions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (distance_start > KVM_INST_B_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) kvm_patching_worked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /* Modify the chunk to fit the invocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) p[kvm_emulate_mtsrin_reg2_offs] |= rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) /* Patch the invocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) kvm_patch_ins_b(inst, distance_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static void __init kvm_map_magic_page(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) u32 *features = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) ulong in[8] = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) ulong out[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) in[0] = KVM_MAGIC_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) in[1] = KVM_MAGIC_PAGE | MAGIC_PAGE_FLAG_NOT_MAPPED_NX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) *features = out[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) static void __init kvm_check_ins(u32 *inst, u32 features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) u32 _inst = *inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) u32 inst_no_rt = _inst & ~KVM_MASK_RT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) u32 inst_rt = _inst & KVM_MASK_RT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) switch (inst_no_rt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /* Loads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) case KVM_INST_MFMSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) case KVM_INST_MFSPR(SPRN_SPRG0):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) case KVM_INST_MFSPR(SPRN_SPRG1):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) case KVM_INST_MFSPR(SPRN_SPRG2):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) case KVM_INST_MFSPR(SPRN_SPRG3):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) case KVM_INST_MFSPR(SPRN_SRR0):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) case KVM_INST_MFSPR(SPRN_SRR1):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) #ifdef CONFIG_BOOKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) case KVM_INST_MFSPR(SPRN_DEAR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) case KVM_INST_MFSPR(SPRN_DAR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) case KVM_INST_MFSPR(SPRN_DSISR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) #ifdef CONFIG_PPC_BOOK3E_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) case KVM_INST_MFSPR(SPRN_MAS0):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) case KVM_INST_MFSPR(SPRN_MAS1):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) case KVM_INST_MFSPR(SPRN_MAS2):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) case KVM_INST_MFSPR(SPRN_MAS3):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) case KVM_INST_MFSPR(SPRN_MAS4):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) case KVM_INST_MFSPR(SPRN_MAS6):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) case KVM_INST_MFSPR(SPRN_MAS7):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) #endif /* CONFIG_PPC_BOOK3E_MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) case KVM_INST_MFSPR(SPRN_SPRG4):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) #ifdef CONFIG_BOOKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) case KVM_INST_MFSPR(SPRN_SPRG4R):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) case KVM_INST_MFSPR(SPRN_SPRG5):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) #ifdef CONFIG_BOOKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) case KVM_INST_MFSPR(SPRN_SPRG5R):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) case KVM_INST_MFSPR(SPRN_SPRG6):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) #ifdef CONFIG_BOOKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) case KVM_INST_MFSPR(SPRN_SPRG6R):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) case KVM_INST_MFSPR(SPRN_SPRG7):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) #ifdef CONFIG_BOOKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) case KVM_INST_MFSPR(SPRN_SPRG7R):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) #ifdef CONFIG_BOOKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) case KVM_INST_MFSPR(SPRN_ESR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) case KVM_INST_MFSPR(SPRN_PIR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* Stores */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) case KVM_INST_MTSPR(SPRN_SPRG0):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) case KVM_INST_MTSPR(SPRN_SPRG1):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) case KVM_INST_MTSPR(SPRN_SPRG2):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) case KVM_INST_MTSPR(SPRN_SPRG3):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) case KVM_INST_MTSPR(SPRN_SRR0):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) case KVM_INST_MTSPR(SPRN_SRR1):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) #ifdef CONFIG_BOOKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) case KVM_INST_MTSPR(SPRN_DEAR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) case KVM_INST_MTSPR(SPRN_DAR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) case KVM_INST_MTSPR(SPRN_DSISR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) #ifdef CONFIG_PPC_BOOK3E_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) case KVM_INST_MTSPR(SPRN_MAS0):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) case KVM_INST_MTSPR(SPRN_MAS1):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) case KVM_INST_MTSPR(SPRN_MAS2):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) case KVM_INST_MTSPR(SPRN_MAS3):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) case KVM_INST_MTSPR(SPRN_MAS4):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) case KVM_INST_MTSPR(SPRN_MAS6):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) case KVM_INST_MTSPR(SPRN_MAS7):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) #endif /* CONFIG_PPC_BOOK3E_MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) case KVM_INST_MTSPR(SPRN_SPRG4):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) case KVM_INST_MTSPR(SPRN_SPRG5):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) case KVM_INST_MTSPR(SPRN_SPRG6):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) case KVM_INST_MTSPR(SPRN_SPRG7):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) #ifdef CONFIG_BOOKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) case KVM_INST_MTSPR(SPRN_ESR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /* Nops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) case KVM_INST_TLBSYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) kvm_patch_ins_nop(inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /* Rewrites */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) case KVM_INST_MTMSRD_L1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) kvm_patch_ins_mtmsrd(inst, inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) case KVM_INST_MTMSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) case KVM_INST_MTMSRD_L0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) kvm_patch_ins_mtmsr(inst, inst_rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) #ifdef CONFIG_BOOKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) case KVM_INST_WRTEE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) kvm_patch_ins_wrtee(inst, inst_rt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) switch (inst_no_rt & ~KVM_MASK_RB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) #ifdef CONFIG_PPC_BOOK3S_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) case KVM_INST_MTSRIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (features & KVM_MAGIC_FEAT_SR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) u32 inst_rb = _inst & KVM_MASK_RB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) switch (_inst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) #ifdef CONFIG_BOOKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) case KVM_INST_WRTEEI_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) kvm_patch_ins_wrteei_0(inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) case KVM_INST_WRTEEI_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) kvm_patch_ins_wrtee(inst, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) extern u32 kvm_template_start[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) extern u32 kvm_template_end[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) static void __init kvm_use_magic_page(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) u32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) u32 *start, *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) u32 features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) /* Tell the host to map the magic page to -4096 on all CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) on_each_cpu(kvm_map_magic_page, &features, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) /* Quick self-test to see if the mapping works */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (fault_in_pages_readable((const char *)KVM_MAGIC_PAGE, sizeof(u32))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) kvm_patching_worked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /* Now loop through all code and find instructions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) start = (void*)_stext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) end = (void*)_etext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * Being interrupted in the middle of patching would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * be bad for SPRG4-7, which KVM can't keep in sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * with emulated accesses because reads don't trap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) for (p = start; p < end; p++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /* Avoid patching the template code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (p >= kvm_template_start && p < kvm_template_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) p = kvm_template_end - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) kvm_check_ins(p, features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) kvm_patching_worked ? "worked" : "failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) static int __init kvm_guest_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (!kvm_para_available())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (!epapr_paravirt_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) kvm_use_magic_page();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /* Enable napping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) powersave_nap = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) postcore_initcall(kvm_guest_init);