^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2017 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Marc Zyngier <marc.zyngier@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/alternative.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/debug-monitors.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/insn.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/kvm_mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * The LSB of the HYP VA tag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static u8 tag_lsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * The HYP VA tag value with the region bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static u64 tag_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static u64 va_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * Compute HYP VA by using the same computation as kern_hyp_va().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static u64 __early_kern_hyp_va(u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) addr &= va_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) addr |= tag_val << tag_lsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * Store a hyp VA <-> PA offset into a EL2-owned variable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static void init_hyp_physvirt_offset(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) u64 kern_va, hyp_va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* Compute the offset from the hyp VA and PA of a random symbol. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) kern_va = (u64)lm_alias(__hyp_text_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) hyp_va = __early_kern_hyp_va(kern_va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) hyp_physvirt_offset = (s64)__pa(kern_va) - (s64)hyp_va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * We want to generate a hyp VA with the following format (with V ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * vabits_actual):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * ---------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * | 0000000 | hyp_va_msb | random tag | kern linear VA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * |--------- tag_val -----------|----- va_mask ---|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * which does not conflict with the idmap regions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) __init void kvm_compute_layout(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) u64 hyp_va_msb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* Where is my RAM region? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) hyp_va_msb = idmap_addr & BIT(vabits_actual - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) hyp_va_msb ^= BIT(vabits_actual - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) tag_lsb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) (u64)(high_memory - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) va_mask = GENMASK_ULL(tag_lsb - 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) tag_val = hyp_va_msb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && tag_lsb != (vabits_actual - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /* We have some free bits to insert a random tag. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) tag_val >>= tag_lsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) init_hyp_physvirt_offset();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * The .hyp.reloc ELF section contains a list of kimg positions that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * contains kimg VAs but will be accessed only in hyp execution context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * Convert them to hyp VAs. See gen-hyprel.c for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) __init void kvm_apply_hyp_relocations(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int32_t *rel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) int32_t *begin = (int32_t *)__hyp_reloc_begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) int32_t *end = (int32_t *)__hyp_reloc_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) for (rel = begin; rel < end; ++rel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) uintptr_t *ptr, kimg_va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * Each entry contains a 32-bit relative offset from itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * to a kimg VA position.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) ptr = (uintptr_t *)lm_alias((char *)rel + *rel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* Read the kimg VA value at the relocation address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) kimg_va = *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /* Convert to hyp VA and store back to the relocation address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) *ptr = __early_kern_hyp_va((uintptr_t)lm_alias(kimg_va));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static u32 compute_instruction(int n, u32 rd, u32 rn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) u32 insn = AARCH64_BREAK_FAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) switch (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_AND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) AARCH64_INSN_VARIANT_64BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) rn, rd, va_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* ROR is a variant of EXTR with Rm = Rn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) insn = aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) rn, rn, rd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) tag_lsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) insn = aarch64_insn_gen_add_sub_imm(rd, rn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) tag_val & GENMASK(11, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) AARCH64_INSN_VARIANT_64BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) AARCH64_INSN_ADSB_ADD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) insn = aarch64_insn_gen_add_sub_imm(rd, rn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) tag_val & GENMASK(23, 12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) AARCH64_INSN_VARIANT_64BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) AARCH64_INSN_ADSB_ADD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* ROR is a variant of EXTR with Rm = Rn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) insn = aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) rn, rn, rd, 64 - tag_lsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) void __init kvm_update_va_mask(struct alt_instr *alt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) __le32 *origptr, __le32 *updptr, int nr_inst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) BUG_ON(nr_inst != 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) for (i = 0; i < nr_inst; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) u32 rd, rn, insn, oinsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * VHE doesn't need any address translation, let's NOP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * everything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * Alternatively, if the tag is zero (because the layout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * dictates it and we don't have any spare bits in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * address), NOP everything after masking the kernel VA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (has_vhe() || (!tag_val && i > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) oinsn = le32_to_cpu(origptr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) rn = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RN, oinsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) insn = compute_instruction(i, rd, rn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) BUG_ON(insn == AARCH64_BREAK_FAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) updptr[i] = cpu_to_le32(insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) void kvm_patch_vector_branch(struct alt_instr *alt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) __le32 *origptr, __le32 *updptr, int nr_inst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) u32 insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) BUG_ON(nr_inst != 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (!cpus_have_const_cap(ARM64_SPECTRE_V3A) || WARN_ON_ONCE(has_vhe()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * Compute HYP VA by using the same computation as kern_hyp_va()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) addr = __early_kern_hyp_va((u64)kvm_ksym_ref(__kvm_hyp_vector));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /* Use PC[10:7] to branch to the same vector in KVM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) addr |= ((u64)origptr & GENMASK_ULL(10, 7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * Branch over the preamble in order to avoid the initial store on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * the stack (which we already perform in the hardening vectors).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) addr += KVM_VECTOR_PREAMBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /* movz x0, #(addr & 0xffff) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) (u16)addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) AARCH64_INSN_VARIANT_64BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) AARCH64_INSN_MOVEWIDE_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) *updptr++ = cpu_to_le32(insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /* movk x0, #((addr >> 16) & 0xffff), lsl #16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) (u16)(addr >> 16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) AARCH64_INSN_VARIANT_64BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) AARCH64_INSN_MOVEWIDE_KEEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) *updptr++ = cpu_to_le32(insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /* movk x0, #((addr >> 32) & 0xffff), lsl #32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) (u16)(addr >> 32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) AARCH64_INSN_VARIANT_64BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) AARCH64_INSN_MOVEWIDE_KEEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) *updptr++ = cpu_to_le32(insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /* br x0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) insn = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) AARCH64_INSN_BRANCH_NOLINK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) *updptr++ = cpu_to_le32(insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static void generate_mov_q(u64 val, __le32 *origptr, __le32 *updptr, int nr_inst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) u32 insn, oinsn, rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) BUG_ON(nr_inst != 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /* Compute target register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) oinsn = le32_to_cpu(*origptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /* movz rd, #(val & 0xffff) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) insn = aarch64_insn_gen_movewide(rd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) (u16)val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) AARCH64_INSN_VARIANT_64BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) AARCH64_INSN_MOVEWIDE_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) *updptr++ = cpu_to_le32(insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /* movk rd, #((val >> 16) & 0xffff), lsl #16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) insn = aarch64_insn_gen_movewide(rd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) (u16)(val >> 16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) AARCH64_INSN_VARIANT_64BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) AARCH64_INSN_MOVEWIDE_KEEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) *updptr++ = cpu_to_le32(insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* movk rd, #((val >> 32) & 0xffff), lsl #32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) insn = aarch64_insn_gen_movewide(rd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) (u16)(val >> 32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) AARCH64_INSN_VARIANT_64BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) AARCH64_INSN_MOVEWIDE_KEEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) *updptr++ = cpu_to_le32(insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /* movk rd, #((val >> 48) & 0xffff), lsl #48 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) insn = aarch64_insn_gen_movewide(rd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) (u16)(val >> 48),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) AARCH64_INSN_VARIANT_64BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) AARCH64_INSN_MOVEWIDE_KEEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) *updptr++ = cpu_to_le32(insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) void kvm_get_kimage_voffset(struct alt_instr *alt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) __le32 *origptr, __le32 *updptr, int nr_inst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) generate_mov_q(kimage_voffset, origptr, updptr, nr_inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) void kvm_compute_final_ctr_el0(struct alt_instr *alt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) __le32 *origptr, __le32 *updptr, int nr_inst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) generate_mov_q(read_sanitised_ftr_reg(SYS_CTR_EL0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) origptr, updptr, nr_inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }