^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _ASM_X86_PARAVIRT_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _ASM_X86_PARAVIRT_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) /* Various instructions on x86 need to be replaced for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * para-virtualization: those hooks are defined here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #ifdef CONFIG_PARAVIRT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/pgtable_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/nospec-branch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/paravirt_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/frame.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static inline unsigned long long paravirt_sched_clock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) return PVOP_CALL0(unsigned long long, time.sched_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct static_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) extern struct static_key paravirt_steal_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) extern struct static_key paravirt_steal_rq_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) __visible void __native_queued_spin_unlock(struct qspinlock *lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) bool pv_is_native_spin_unlock(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) __visible bool __native_vcpu_is_preempted(long cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) bool pv_is_native_vcpu_is_preempted(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static inline u64 paravirt_steal_clock(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) return PVOP_CALL1(u64, time.steal_clock, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* The paravirtualized I/O functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static inline void slow_down_io(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) pv_ops.cpu.io_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #ifdef REALLY_SLOW_IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) pv_ops.cpu.io_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) pv_ops.cpu.io_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) pv_ops.cpu.io_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) void native_flush_tlb_local(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) void native_flush_tlb_global(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) void native_flush_tlb_one_user(unsigned long addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) void native_flush_tlb_others(const struct cpumask *cpumask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) const struct flush_tlb_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static inline void __flush_tlb_local(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) PVOP_VCALL0(mmu.flush_tlb_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static inline void __flush_tlb_global(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) PVOP_VCALL0(mmu.flush_tlb_kernel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static inline void __flush_tlb_one_user(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) PVOP_VCALL1(mmu.flush_tlb_one_user, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static inline void __flush_tlb_others(const struct cpumask *cpumask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) const struct flush_tlb_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) PVOP_VCALL2(mmu.flush_tlb_others, cpumask, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) PVOP_VCALL2(mmu.tlb_remove_table, tlb, table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) PVOP_VCALL1(mmu.exit_mmap, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #ifdef CONFIG_PARAVIRT_XXL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static inline void load_sp0(unsigned long sp0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) PVOP_VCALL1(cpu.load_sp0, sp0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* The paravirtualized CPUID instruction. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) unsigned int *ecx, unsigned int *edx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * These special macros can be used to get or set a debugging register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static inline unsigned long paravirt_get_debugreg(int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return PVOP_CALL1(unsigned long, cpu.get_debugreg, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static inline void set_debugreg(unsigned long val, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) PVOP_VCALL2(cpu.set_debugreg, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static inline unsigned long read_cr0(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return PVOP_CALL0(unsigned long, cpu.read_cr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static inline void write_cr0(unsigned long x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) PVOP_VCALL1(cpu.write_cr0, x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static inline unsigned long read_cr2(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return PVOP_CALLEE0(unsigned long, mmu.read_cr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static inline void write_cr2(unsigned long x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) PVOP_VCALL1(mmu.write_cr2, x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static inline unsigned long __read_cr3(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return PVOP_CALL0(unsigned long, mmu.read_cr3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static inline void write_cr3(unsigned long x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) PVOP_VCALL1(mmu.write_cr3, x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static inline void __write_cr4(unsigned long x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) PVOP_VCALL1(cpu.write_cr4, x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static inline void arch_safe_halt(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) PVOP_VCALL0(irq.safe_halt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static inline void halt(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) PVOP_VCALL0(irq.halt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static inline void wbinvd(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) PVOP_VCALL0(cpu.wbinvd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static inline u64 paravirt_read_msr(unsigned msr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return PVOP_CALL1(u64, cpu.read_msr, msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static inline void paravirt_write_msr(unsigned msr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) unsigned low, unsigned high)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) PVOP_VCALL3(cpu.write_msr, msr, low, high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static inline int paravirt_write_msr_safe(unsigned msr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) unsigned low, unsigned high)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define rdmsr(msr, val1, val2) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) u64 _l = paravirt_read_msr(msr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) val1 = (u32)_l; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) val2 = _l >> 32; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define wrmsr(msr, val1, val2) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) paravirt_write_msr(msr, val1, val2); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #define rdmsrl(msr, val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) val = paravirt_read_msr(msr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static inline void wrmsrl(unsigned msr, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) wrmsr(msr, (u32)val, (u32)(val>>32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #define wrmsr_safe(msr, a, b) paravirt_write_msr_safe(msr, a, b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /* rdmsr with exception handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #define rdmsr_safe(msr, a, b) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) int _err; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) u64 _l = paravirt_read_msr_safe(msr, &_err); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) (*a) = (u32)_l; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) (*b) = _l >> 32; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) _err; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) *p = paravirt_read_msr_safe(msr, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static inline unsigned long long paravirt_read_pmc(int counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return PVOP_CALL1(u64, cpu.read_pmc, counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) #define rdpmc(counter, low, high) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) u64 _l = paravirt_read_pmc(counter); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) low = (u32)_l; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) high = _l >> 32; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) PVOP_VCALL2(cpu.alloc_ldt, ldt, entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) PVOP_VCALL2(cpu.free_ldt, ldt, entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static inline void load_TR_desc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) PVOP_VCALL0(cpu.load_tr_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static inline void load_gdt(const struct desc_ptr *dtr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) PVOP_VCALL1(cpu.load_gdt, dtr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static inline void load_idt(const struct desc_ptr *dtr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) PVOP_VCALL1(cpu.load_idt, dtr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static inline void set_ldt(const void *addr, unsigned entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) PVOP_VCALL2(cpu.set_ldt, addr, entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static inline unsigned long paravirt_store_tr(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return PVOP_CALL0(unsigned long, cpu.store_tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #define store_tr(tr) ((tr) = paravirt_store_tr())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static inline void load_TLS(struct thread_struct *t, unsigned cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) PVOP_VCALL2(cpu.load_tls, t, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static inline void load_gs_index(unsigned int gs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) PVOP_VCALL1(cpu.load_gs_index, gs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static inline void write_ldt_entry(struct desc_struct *dt, int entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) const void *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) PVOP_VCALL3(cpu.write_ldt_entry, dt, entry, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static inline void write_gdt_entry(struct desc_struct *dt, int entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) void *desc, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) PVOP_VCALL4(cpu.write_gdt_entry, dt, entry, desc, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) #ifdef CONFIG_X86_IOPL_IOPERM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static inline void tss_invalidate_io_bitmap(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) PVOP_VCALL0(cpu.invalidate_io_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static inline void tss_update_io_bitmap(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) PVOP_VCALL0(cpu.update_io_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static inline void paravirt_activate_mm(struct mm_struct *prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct mm_struct *next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) PVOP_VCALL2(mmu.activate_mm, prev, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) PVOP_VCALL2(mmu.dup_mmap, oldmm, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static inline int paravirt_pgd_alloc(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return PVOP_CALL1(int, mmu.pgd_alloc, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) PVOP_VCALL2(mmu.pgd_free, mm, pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) PVOP_VCALL2(mmu.alloc_pte, mm, pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static inline void paravirt_release_pte(unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) PVOP_VCALL1(mmu.release_pte, pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) PVOP_VCALL2(mmu.alloc_pmd, mm, pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static inline void paravirt_release_pmd(unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) PVOP_VCALL1(mmu.release_pmd, pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) PVOP_VCALL2(mmu.alloc_pud, mm, pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static inline void paravirt_release_pud(unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) PVOP_VCALL1(mmu.release_pud, pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) PVOP_VCALL2(mmu.alloc_p4d, mm, pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static inline void paravirt_release_p4d(unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) PVOP_VCALL1(mmu.release_p4d, pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static inline pte_t __pte(pteval_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return (pte_t) { PVOP_CALLEE1(pteval_t, mmu.make_pte, val) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static inline pteval_t pte_val(pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) static inline pgd_t __pgd(pgdval_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return (pgd_t) { PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static inline pgdval_t pgd_val(pgd_t pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) return PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) pteval_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, vma, addr, ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return (pte_t) { .pte = ret };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) pte_t *ptep, pte_t old_pte, pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) PVOP_VCALL4(mmu.ptep_modify_prot_commit, vma, addr, ptep, pte.pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) static inline void set_pte(pte_t *ptep, pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) PVOP_VCALL2(mmu.set_pte, ptep, pte.pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) PVOP_VCALL2(mmu.set_pmd, pmdp, native_pmd_val(pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) static inline pmd_t __pmd(pmdval_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return (pmd_t) { PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static inline pmdval_t pmd_val(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static inline void set_pud(pud_t *pudp, pud_t pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) PVOP_VCALL2(mmu.set_pud, pudp, native_pud_val(pud));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static inline pud_t __pud(pudval_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) pudval_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) ret = PVOP_CALLEE1(pudval_t, mmu.make_pud, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return (pud_t) { ret };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static inline pudval_t pud_val(pud_t pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return PVOP_CALLEE1(pudval_t, mmu.pud_val, pud.pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static inline void pud_clear(pud_t *pudp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) set_pud(pudp, native_make_pud(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) p4dval_t val = native_p4d_val(p4d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) PVOP_VCALL2(mmu.set_p4d, p4dp, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) #if CONFIG_PGTABLE_LEVELS >= 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static inline p4d_t __p4d(p4dval_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) p4dval_t ret = PVOP_CALLEE1(p4dval_t, mmu.make_p4d, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return (p4d_t) { ret };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static inline p4dval_t p4d_val(p4d_t p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return PVOP_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) PVOP_VCALL2(mmu.set_pgd, pgdp, native_pgd_val(pgd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) #define set_pgd(pgdp, pgdval) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (pgtable_l5_enabled()) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) __set_pgd(pgdp, pgdval); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) else \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd }); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) #define pgd_clear(pgdp) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (pgtable_l5_enabled()) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) set_pgd(pgdp, native_make_pgd(0)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) #endif /* CONFIG_PGTABLE_LEVELS == 5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static inline void p4d_clear(p4d_t *p4dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) set_p4d(p4dp, native_make_p4d(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) set_pte(ptep, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) set_pte(ptep, native_make_pte(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) static inline void pmd_clear(pmd_t *pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) set_pmd(pmdp, native_make_pmd(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) #define __HAVE_ARCH_START_CONTEXT_SWITCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) static inline void arch_start_context_switch(struct task_struct *prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) PVOP_VCALL1(cpu.start_context_switch, prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) static inline void arch_end_context_switch(struct task_struct *next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) PVOP_VCALL1(cpu.end_context_switch, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) static inline void arch_enter_lazy_mmu_mode(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) PVOP_VCALL0(mmu.lazy_mode.enter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static inline void arch_leave_lazy_mmu_mode(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) PVOP_VCALL0(mmu.lazy_mode.leave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) static inline void arch_flush_lazy_mmu_mode(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) PVOP_VCALL0(mmu.lazy_mode.flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) phys_addr_t phys, pgprot_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) pv_ops.mmu.set_fixmap(idx, phys, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) PVOP_VCALL2(lock.queued_spin_lock_slowpath, lock, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) PVOP_VCALLEE1(lock.queued_spin_unlock, lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) static __always_inline void pv_wait(u8 *ptr, u8 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) PVOP_VCALL2(lock.wait, ptr, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) static __always_inline void pv_kick(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) PVOP_VCALL1(lock.kick, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) static __always_inline bool pv_vcpu_is_preempted(long cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return PVOP_CALLEE1(bool, lock.vcpu_is_preempted, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) #endif /* SMP && PARAVIRT_SPINLOCKS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /* save and restore all caller-save registers, except return value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) #define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) #define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) /* save and restore all caller-save registers, except return value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) #define PV_SAVE_ALL_CALLER_REGS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) "push %rcx;" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) "push %rdx;" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) "push %rsi;" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) "push %rdi;" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) "push %r8;" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) "push %r9;" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) "push %r10;" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) "push %r11;"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) #define PV_RESTORE_ALL_CALLER_REGS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) "pop %r11;" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) "pop %r10;" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) "pop %r9;" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) "pop %r8;" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) "pop %rdi;" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) "pop %rsi;" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) "pop %rdx;" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) "pop %rcx;"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * Generate a thunk around a function which saves all caller-save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * registers except for the return value. This allows C functions to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * be called from assembler code where fewer than normal registers are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * available. It may also help code generation around calls from C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * code if the common case doesn't use many registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * When a callee is wrapped in a thunk, the caller can assume that all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * arg regs and all scratch registers are preserved across the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * call. The return value in rax/eax will not be saved, even for void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) #define PV_THUNK_NAME(func) "__raw_callee_save_" #func
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) #define PV_CALLEE_SAVE_REGS_THUNK(func) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) extern typeof(func) __raw_callee_save_##func; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) asm(".pushsection .text;" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) ".globl " PV_THUNK_NAME(func) ";" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) ".type " PV_THUNK_NAME(func) ", @function;" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) PV_THUNK_NAME(func) ":" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) FRAME_BEGIN \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) PV_SAVE_ALL_CALLER_REGS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) "call " #func ";" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) PV_RESTORE_ALL_CALLER_REGS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) FRAME_END \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) "ret;" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) ".popsection")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /* Get a reference to a callee-save function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) #define PV_CALLEE_SAVE(func) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) ((struct paravirt_callee_save) { __raw_callee_save_##func })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) /* Promise that "func" already uses the right calling convention */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) #define __PV_IS_CALLEE_SAVE(func) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) ((struct paravirt_callee_save) { func })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) #ifdef CONFIG_PARAVIRT_XXL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) static inline notrace unsigned long arch_local_save_flags(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return PVOP_CALLEE0(unsigned long, irq.save_fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) static inline notrace void arch_local_irq_restore(unsigned long f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) PVOP_VCALLEE1(irq.restore_fl, f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) static inline notrace void arch_local_irq_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) PVOP_VCALLEE0(irq.irq_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) static inline notrace void arch_local_irq_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) PVOP_VCALLEE0(irq.irq_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) static inline notrace unsigned long arch_local_irq_save(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) unsigned long f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) f = arch_local_save_flags();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) arch_local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /* Make sure as little as possible of this mess escapes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) #undef PARAVIRT_CALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) #undef __PVOP_CALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) #undef __PVOP_VCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) #undef PVOP_VCALL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) #undef PVOP_CALL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) #undef PVOP_VCALL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) #undef PVOP_CALL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) #undef PVOP_VCALL2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) #undef PVOP_CALL2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) #undef PVOP_VCALL3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) #undef PVOP_CALL3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) #undef PVOP_VCALL4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) #undef PVOP_CALL4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) extern void default_banner(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) #else /* __ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) #define _PVSITE(ptype, ops, word, algn) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 771:; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) ops; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 772:; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) .pushsection .parainstructions,"a"; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) .align algn; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) word 771b; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) .byte ptype; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) .byte 772b-771b; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) .popsection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) #define COND_PUSH(set, mask, reg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) .if ((~(set)) & mask); push %reg; .endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) #define COND_POP(set, mask, reg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) .if ((~(set)) & mask); pop %reg; .endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) #define PV_SAVE_REGS(set) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) COND_PUSH(set, CLBR_RAX, rax); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) COND_PUSH(set, CLBR_RCX, rcx); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) COND_PUSH(set, CLBR_RDX, rdx); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) COND_PUSH(set, CLBR_RSI, rsi); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) COND_PUSH(set, CLBR_RDI, rdi); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) COND_PUSH(set, CLBR_R8, r8); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) COND_PUSH(set, CLBR_R9, r9); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) COND_PUSH(set, CLBR_R10, r10); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) COND_PUSH(set, CLBR_R11, r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) #define PV_RESTORE_REGS(set) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) COND_POP(set, CLBR_R11, r11); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) COND_POP(set, CLBR_R10, r10); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) COND_POP(set, CLBR_R9, r9); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) COND_POP(set, CLBR_R8, r8); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) COND_POP(set, CLBR_RDI, rdi); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) COND_POP(set, CLBR_RSI, rsi); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) COND_POP(set, CLBR_RDX, rdx); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) COND_POP(set, CLBR_RCX, rcx); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) COND_POP(set, CLBR_RAX, rax)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) #define PARA_PATCH(off) ((off) / 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) #define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .quad, 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) #define PARA_INDIRECT(addr) *addr(%rip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) #define PV_SAVE_REGS(set) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) COND_PUSH(set, CLBR_EAX, eax); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) COND_PUSH(set, CLBR_EDI, edi); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) COND_PUSH(set, CLBR_ECX, ecx); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) COND_PUSH(set, CLBR_EDX, edx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) #define PV_RESTORE_REGS(set) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) COND_POP(set, CLBR_EDX, edx); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) COND_POP(set, CLBR_ECX, ecx); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) COND_POP(set, CLBR_EDI, edi); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) COND_POP(set, CLBR_EAX, eax)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) #define PARA_PATCH(off) ((off) / 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) #define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .long, 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) #define PARA_INDIRECT(addr) *%cs:addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) #ifdef CONFIG_PARAVIRT_XXL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) #define INTERRUPT_RETURN \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) PARA_SITE(PARA_PATCH(PV_CPU_iret), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) ANNOTATE_RETPOLINE_SAFE; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) jmp PARA_INDIRECT(pv_ops+PV_CPU_iret);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) #define DISABLE_INTERRUPTS(clobbers) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) PARA_SITE(PARA_PATCH(PV_IRQ_irq_disable), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) ANNOTATE_RETPOLINE_SAFE; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) call PARA_INDIRECT(pv_ops+PV_IRQ_irq_disable); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) #define ENABLE_INTERRUPTS(clobbers) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) PARA_SITE(PARA_PATCH(PV_IRQ_irq_enable), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) ANNOTATE_RETPOLINE_SAFE; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) call PARA_INDIRECT(pv_ops+PV_IRQ_irq_enable); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) #ifdef CONFIG_PARAVIRT_XXL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) #define USERGS_SYSRET64 \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) PARA_SITE(PARA_PATCH(PV_CPU_usergs_sysret64), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) ANNOTATE_RETPOLINE_SAFE; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) jmp PARA_INDIRECT(pv_ops+PV_CPU_usergs_sysret64);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) #ifdef CONFIG_DEBUG_ENTRY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) #define SAVE_FLAGS(clobbers) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) PARA_SITE(PARA_PATCH(PV_IRQ_save_fl), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) ANNOTATE_RETPOLINE_SAFE; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) #endif /* CONFIG_PARAVIRT_XXL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) #endif /* CONFIG_X86_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) #ifdef CONFIG_PARAVIRT_XXL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) #define GET_CR2_INTO_AX \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) PARA_SITE(PARA_PATCH(PV_MMU_read_cr2), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) ANNOTATE_RETPOLINE_SAFE; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) call PARA_INDIRECT(pv_ops+PV_MMU_read_cr2); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) #endif /* CONFIG_PARAVIRT_XXL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) #endif /* __ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) #else /* CONFIG_PARAVIRT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) # define default_banner x86_init_noop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) #endif /* !CONFIG_PARAVIRT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) #ifndef CONFIG_PARAVIRT_XXL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) #ifndef CONFIG_PARAVIRT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) #endif /* __ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) #endif /* _ASM_X86_PARAVIRT_H */