^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <asm/x86_init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <xen/interface/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <xen/interface/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <xen/interface/vcpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <xen/features.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <xen/events.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/xen/hypercall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/xen/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "xen-ops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Force a proper event-channel callback from Xen after clearing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * callback mask. We do this in a very simple manner, by making a call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * down into Xen. The pending flag will be checked by Xen on return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) void xen_force_evtchn_callback(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) (void)HYPERVISOR_xen_version(0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) asmlinkage __visible unsigned long xen_save_fl(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct vcpu_info *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) vcpu = this_cpu_read(xen_vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* flag has opposite sense of mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) flags = !vcpu->evtchn_upcall_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* convert to IF type flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) -0 -> 0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) -1 -> 0xffffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return (-flags) & X86_EFLAGS_IF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) __visible void xen_restore_fl(unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct vcpu_info *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* convert from IF type flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) flags = !(flags & X86_EFLAGS_IF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /* See xen_irq_enable() for why preemption must be disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) vcpu = this_cpu_read(xen_vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) vcpu->evtchn_upcall_mask = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (flags == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) barrier(); /* unmask then check (avoid races) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (unlikely(vcpu->evtchn_upcall_pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) xen_force_evtchn_callback();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) preempt_enable_no_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) asmlinkage __visible void xen_irq_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* There's a one instruction preempt window here. We need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) make sure we're don't switch CPUs between getting the vcpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) pointer and updating the mask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) this_cpu_read(xen_vcpu)->evtchn_upcall_mask = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) preempt_enable_no_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) asmlinkage __visible void xen_irq_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct vcpu_info *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * We may be preempted as soon as vcpu->evtchn_upcall_mask is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * cleared, so disable preemption to ensure we check for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * events on the VCPU we are still running on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) vcpu = this_cpu_read(xen_vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) vcpu->evtchn_upcall_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* Doesn't matter if we get preempted here, because any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) pending event will get dealt with anyway. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) barrier(); /* unmask then check (avoid races) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (unlikely(vcpu->evtchn_upcall_pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) xen_force_evtchn_callback();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static void xen_safe_halt(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* Blocking includes an implicit local_irq_enable(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static void xen_halt(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (irqs_disabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) HYPERVISOR_vcpu_op(VCPUOP_down,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) xen_vcpu_nr(smp_processor_id()), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) xen_safe_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static const struct pv_irq_ops xen_irq_ops __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) .save_fl = PV_CALLEE_SAVE(xen_save_fl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) .restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) .irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) .irq_enable = PV_CALLEE_SAVE(xen_irq_enable),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) .safe_halt = xen_safe_halt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) .halt = xen_halt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) void __init xen_init_irq_ops(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) pv_ops.irq = xen_irq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) x86_init.irqs.intr_init = xen_init_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }