^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/stringify.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <asm/paravirt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define PSTART(d, m) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) patch_data_##d.m
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #define PEND(d, m) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) (PSTART(d, m) + sizeof(patch_data_##d.m))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define PATCH(d, m, insn_buff, len) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) paravirt_patch_insns(insn_buff, len, PSTART(d, m), PEND(d, m))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define PATCH_CASE(ops, m, data, insn_buff, len) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) case PARAVIRT_PATCH(ops.m): \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) return PATCH(data, ops##_##m, insn_buff, len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #ifdef CONFIG_PARAVIRT_XXL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct patch_xxl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) const unsigned char irq_irq_disable[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) const unsigned char irq_irq_enable[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) const unsigned char irq_save_fl[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) const unsigned char mmu_read_cr2[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) const unsigned char mmu_read_cr3[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) const unsigned char mmu_write_cr3[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) const unsigned char irq_restore_fl[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) const unsigned char cpu_wbinvd[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) const unsigned char cpu_usergs_sysret64[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) const unsigned char mov64[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static const struct patch_xxl patch_data_xxl = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) .irq_irq_disable = { 0xfa }, // cli
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) .irq_irq_enable = { 0xfb }, // sti
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) .irq_save_fl = { 0x9c, 0x58 }, // pushf; pop %[re]ax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) .mmu_read_cr2 = { 0x0f, 0x20, 0xd0 }, // mov %cr2, %[re]ax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) .mmu_read_cr3 = { 0x0f, 0x20, 0xd8 }, // mov %cr3, %[re]ax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) .mmu_write_cr3 = { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) .irq_restore_fl = { 0x57, 0x9d }, // push %rdi; popfq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) .cpu_wbinvd = { 0x0f, 0x09 }, // wbinvd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) .cpu_usergs_sysret64 = { 0x0f, 0x01, 0xf8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) 0x48, 0x0f, 0x07 }, // swapgs; sysretq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) .mov64 = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) unsigned int paravirt_patch_ident_64(void *insn_buff, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return PATCH(xxl, mov64, insn_buff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) # endif /* CONFIG_PARAVIRT_XXL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #ifdef CONFIG_PARAVIRT_SPINLOCKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct patch_lock {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) unsigned char queued_spin_unlock[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) unsigned char vcpu_is_preempted[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static const struct patch_lock patch_data_lock = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) .vcpu_is_preempted = { 0x31, 0xc0 }, // xor %eax, %eax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) # ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) .queued_spin_unlock = { 0xc6, 0x07, 0x00 }, // movb $0, (%rdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) # else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) .queued_spin_unlock = { 0xc6, 0x00, 0x00 }, // movb $0, (%eax)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) # endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #endif /* CONFIG_PARAVIRT_SPINLOCKS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) unsigned int native_patch(u8 type, void *insn_buff, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #ifdef CONFIG_PARAVIRT_XXL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) PATCH_CASE(irq, restore_fl, xxl, insn_buff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) PATCH_CASE(irq, save_fl, xxl, insn_buff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) PATCH_CASE(irq, irq_enable, xxl, insn_buff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) PATCH_CASE(irq, irq_disable, xxl, insn_buff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) PATCH_CASE(mmu, read_cr2, xxl, insn_buff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) PATCH_CASE(mmu, read_cr3, xxl, insn_buff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) PATCH_CASE(mmu, write_cr3, xxl, insn_buff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) PATCH_CASE(cpu, usergs_sysret64, xxl, insn_buff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) PATCH_CASE(cpu, wbinvd, xxl, insn_buff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #ifdef CONFIG_PARAVIRT_SPINLOCKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) case PARAVIRT_PATCH(lock.queued_spin_unlock):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (pv_is_native_spin_unlock())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return PATCH(lock, queued_spin_unlock, insn_buff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) case PARAVIRT_PATCH(lock.vcpu_is_preempted):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (pv_is_native_vcpu_is_preempted())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return PATCH(lock, vcpu_is_preempted, insn_buff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return paravirt_patch_default(type, insn_buff, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }