^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _XEN_EVENTS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _XEN_EVENTS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #ifdef CONFIG_PCI_MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/msi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <xen/interface/event_channel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/xen/hypercall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/xen/events.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) unsigned xen_evtchn_nr_channels(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) int bind_evtchn_to_irq(evtchn_port_t evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) irq_handler_t handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) unsigned long irqflags, const char *devname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) irq_handler_t handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) unsigned long irqflags, const char *devname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) irq_handler_t handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) unsigned long irqflags, const char *devname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) int bind_ipi_to_irqhandler(enum ipi_vector ipi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) unsigned int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) irq_handler_t handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) unsigned long irqflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) const char *devname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) evtchn_port_t remote_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) evtchn_port_t remote_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) irq_handler_t handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) unsigned long irqflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) const char *devname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * Common unbind function for all event sources. Takes IRQ to unbind from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * Automatically closes the underlying event channel (even for bindings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * made with bind_evtchn_to_irqhandler()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) void unbind_from_irqhandler(unsigned int irq, void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * Send late EOI for an IRQ bound to an event channel via one of the *_lateeoi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * functions above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* Signal an event was spurious, i.e. there was no action resulting from it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define XEN_EOI_FLAG_SPURIOUS 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define XEN_IRQ_PRIORITY_MAX EVTCHN_FIFO_PRIORITY_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define XEN_IRQ_PRIORITY_DEFAULT EVTCHN_FIFO_PRIORITY_DEFAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define XEN_IRQ_PRIORITY_MIN EVTCHN_FIFO_PRIORITY_MIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) int xen_set_irq_priority(unsigned irq, unsigned priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * Allow extra references to event channels exposed to userspace by evtchn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) int evtchn_make_refcounted(evtchn_port_t evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) int evtchn_get(evtchn_port_t evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) void evtchn_put(evtchn_port_t evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) void rebind_evtchn_irq(evtchn_port_t evtchn, int irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static inline void notify_remote_via_evtchn(evtchn_port_t port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct evtchn_send send = { .port = port };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) void notify_remote_via_irq(int irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) void xen_irq_resume(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /* Clear an irq's pending state, in preparation for polling on it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) void xen_clear_irq_pending(int irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) void xen_set_irq_pending(int irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) bool xen_test_irq_pending(int irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* Poll waiting for an irq to become pending. In the usual case, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) irq will be disabled so it won't deliver an interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) void xen_poll_irq(int irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /* Poll waiting for an irq to become pending with a timeout. In the usual case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * the irq will be disabled so it won't deliver an interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) void xen_poll_irq_timeout(int irq, u64 timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* Determine the IRQ which is bound to an event channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) unsigned int irq_from_evtchn(evtchn_port_t evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) int irq_from_virq(unsigned int cpu, unsigned int virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) evtchn_port_t evtchn_from_irq(unsigned irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int xen_set_callback_via(uint64_t via);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) void xen_evtchn_do_upcall(struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) void xen_hvm_evtchn_do_upcall(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* Bind a pirq for a physical interrupt to an irq. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) int xen_bind_pirq_gsi_to_irq(unsigned gsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned pirq, int shareable, char *name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #ifdef CONFIG_PCI_MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* Allocate a pirq for a MSI style physical interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* Bind an PSI pirq to an irq. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int pirq, int nvec, const char *name, domid_t domid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* De-allocates the above mentioned physical interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) int xen_destroy_irq(int irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* Return irq from pirq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) int xen_irq_from_pirq(unsigned pirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* Return the pirq allocated to the irq. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int xen_pirq_from_irq(unsigned irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* Return the irq allocated to the gsi */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) int xen_irq_from_gsi(unsigned gsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* Determine whether to ignore this IRQ if it is passed to a guest. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int xen_test_irq_shared(int irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /* initialize Xen IRQ subsystem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) void xen_init_IRQ(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #endif /* _XEN_EVENTS_H */