Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Xen event channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Xen models interrupts with abstract event channels.  Because each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * domain gets 1024 event channels, but NR_IRQ is not that large, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * must dynamically map irqs<->event channels.  The event channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * interface with the rest of the kernel by defining a xen interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * chip.  When an event is received, it is mapped to an irq and sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * through the normal interrupt processing path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * There are four kinds of events which can be mapped to an event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * channel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * 1. Inter-domain notifications.  This includes all the virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  *    device events, since they're driven by front-ends in another domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *    (typically dom0).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * 2. VIRQs, typically used for timers.  These are per-cpu events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  * 3. IPIs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * 4. PIRQs - Hardware interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/irqnr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/cpuhotplug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <linux/ktime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #ifdef CONFIG_X86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <asm/desc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <asm/idtentry.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <asm/io_apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <asm/i8259.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <asm/xen/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <asm/sync_bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <asm/xen/hypercall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include <asm/xen/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #include <xen/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #include <xen/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #include <xen/hvm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #include <xen/xen-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #include <xen/events.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #include <xen/interface/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #include <xen/interface/event_channel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #include <xen/interface/hvm/hvm_op.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #include <xen/interface/hvm/params.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #include <xen/interface/physdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #include <xen/interface/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #include <xen/interface/vcpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #include <asm/hw_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #include "events_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #undef MODULE_PARAM_PREFIX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #define MODULE_PARAM_PREFIX "xen."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) /* Interrupt types. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) enum xen_irq_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	IRQT_UNBOUND = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	IRQT_PIRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	IRQT_VIRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	IRQT_IPI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	IRQT_EVTCHN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83)  * Packed IRQ information:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84)  * type - enum xen_irq_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85)  * event channel - irq->event channel mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86)  * cpu - cpu this event channel is bound to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87)  * index - type-specific information:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88)  *    PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89)  *           guest, or GSI (real passthrough IRQ) of the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90)  *    VIRQ - virq number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91)  *    IPI - IPI vector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92)  *    EVTCHN -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) struct irq_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	struct list_head eoi_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	short refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	short spurious_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	short type;             /* type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	u8 mask_reason;         /* Why is event channel masked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) #define EVT_MASK_REASON_EXPLICIT	0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) #define EVT_MASK_REASON_TEMPORARY	0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) #define EVT_MASK_REASON_EOI_PENDING	0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	u8 is_active;		/* Is event just being handled? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	unsigned irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	evtchn_port_t evtchn;   /* event channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	unsigned short cpu;     /* cpu bound */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	u64 eoi_time;           /* Time in jiffies when to EOI. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	raw_spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 		unsigned short virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		enum ipi_vector ipi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 			unsigned short pirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 			unsigned short gsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 			unsigned char vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 			unsigned char flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 			uint16_t domid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		} pirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	} u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) #define PIRQ_NEEDS_EOI	(1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) #define PIRQ_SHAREABLE	(1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) #define PIRQ_MSI_GROUP	(1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) static uint __read_mostly event_loop_timeout = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) module_param(event_loop_timeout, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) static uint __read_mostly event_eoi_delay = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) module_param(event_eoi_delay, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) const struct evtchn_ops *evtchn_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139)  * This lock protects updates to the following mapping and reference-count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140)  * arrays. The lock does not need to be acquired to read the mapping tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) static DEFINE_MUTEX(irq_mapping_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145)  * Lock protecting event handling loop against removing event channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146)  * Adding of event channels is no issue as the associated IRQ becomes active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147)  * only after everything is setup (before request_[threaded_]irq() the handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148)  * can't be entered for an event, as the event channel will be unmasked only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149)  * then).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) static DEFINE_RWLOCK(evtchn_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154)  * Lock hierarchy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  * irq_mapping_update_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157)  *   evtchn_rwlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158)  *     IRQ-desc lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159)  *       percpu eoi_list_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160)  *         irq_info->lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) static LIST_HEAD(xen_irq_list_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) /* IRQ <-> VIRQ mapping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) /* IRQ <-> IPI mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) static int **evtchn_to_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) #ifdef CONFIG_X86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) static unsigned long *pirq_eoi_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) static bool (*pirq_needs_eoi)(unsigned irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) #define EVTCHN_ROW(e)  (e / (PAGE_SIZE/sizeof(**evtchn_to_irq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) #define EVTCHN_COL(e)  (e % (PAGE_SIZE/sizeof(**evtchn_to_irq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) #define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) /* Xen will never allocate port zero for any purpose. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) #define VALID_EVTCHN(chn)	((chn) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) static struct irq_info *legacy_info_ptrs[NR_IRQS_LEGACY];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) static struct irq_chip xen_dynamic_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) static struct irq_chip xen_lateeoi_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) static struct irq_chip xen_percpu_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) static struct irq_chip xen_pirq_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) static void enable_dynirq(struct irq_data *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) static void disable_dynirq(struct irq_data *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) static DEFINE_PER_CPU(unsigned int, irq_epoch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) static void clear_evtchn_to_irq_row(int *evtchn_row)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	unsigned col;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	for (col = 0; col < EVTCHN_PER_ROW; col++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		WRITE_ONCE(evtchn_row[col], -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) static void clear_evtchn_to_irq_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	unsigned row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		if (evtchn_to_irq[row] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		clear_evtchn_to_irq_row(evtchn_to_irq[row]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	unsigned row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	unsigned col;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	int *evtchn_row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	if (evtchn >= xen_evtchn_max_channels())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	row = EVTCHN_ROW(evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	col = EVTCHN_COL(evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	if (evtchn_to_irq[row] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		/* Unallocated irq entries return -1 anyway */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		if (irq == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		evtchn_row = (int *) __get_free_pages(GFP_KERNEL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		if (evtchn_row == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		clear_evtchn_to_irq_row(evtchn_row);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		 * We've prepared an empty row for the mapping. If a different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		 * thread was faster inserting it, we can drop ours.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		if (cmpxchg(&evtchn_to_irq[row], NULL, evtchn_row) != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 			free_page((unsigned long) evtchn_row);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	WRITE_ONCE(evtchn_to_irq[row][col], irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) int get_evtchn_to_irq(evtchn_port_t evtchn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	if (evtchn >= xen_evtchn_max_channels())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	return READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) /* Get info for IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) static struct irq_info *info_for_irq(unsigned irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	if (irq < nr_legacy_irqs())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		return legacy_info_ptrs[irq];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		return irq_get_chip_data(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) static void set_info_for_irq(unsigned int irq, struct irq_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	if (irq < nr_legacy_irqs())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		legacy_info_ptrs[irq] = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		irq_set_chip_data(irq, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) /* Constructors for packed IRQ information. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) static int xen_irq_info_common_setup(struct irq_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 				     unsigned irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 				     enum xen_irq_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 				     evtchn_port_t evtchn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 				     unsigned short cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	info->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	info->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	info->evtchn = evtchn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	info->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	info->mask_reason = EVT_MASK_REASON_EXPLICIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	raw_spin_lock_init(&info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	ret = set_evtchn_to_irq(evtchn, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	return xen_evtchn_port_setup(evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) static int xen_irq_info_evtchn_setup(unsigned irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 				     evtchn_port_t evtchn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	struct irq_info *info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	return xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) static int xen_irq_info_ipi_setup(unsigned cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 				  unsigned irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 				  evtchn_port_t evtchn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 				  enum ipi_vector ipi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	struct irq_info *info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	info->u.ipi = ipi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	per_cpu(ipi_to_irq, cpu)[ipi] = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) static int xen_irq_info_virq_setup(unsigned cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 				   unsigned irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 				   evtchn_port_t evtchn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 				   unsigned virq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	struct irq_info *info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	info->u.virq = virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	per_cpu(virq_to_irq, cpu)[virq] = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) static int xen_irq_info_pirq_setup(unsigned irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 				   evtchn_port_t evtchn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 				   unsigned pirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 				   unsigned gsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 				   uint16_t domid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 				   unsigned char flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	struct irq_info *info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	info->u.pirq.pirq = pirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	info->u.pirq.gsi = gsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	info->u.pirq.domid = domid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	info->u.pirq.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) static void xen_irq_info_cleanup(struct irq_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	set_evtchn_to_irq(info->evtchn, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	xen_evtchn_port_remove(info->evtchn, info->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	info->evtchn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363)  * Accessors for packed IRQ information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) evtchn_port_t evtchn_from_irq(unsigned irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	const struct irq_info *info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	if (likely(irq < nr_irqs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	if (!info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	return info->evtchn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) unsigned int irq_from_evtchn(evtchn_port_t evtchn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	return get_evtchn_to_irq(evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) EXPORT_SYMBOL_GPL(irq_from_evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) int irq_from_virq(unsigned int cpu, unsigned int virq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	return per_cpu(virq_to_irq, cpu)[virq];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) static enum ipi_vector ipi_from_irq(unsigned irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	struct irq_info *info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	BUG_ON(info == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	BUG_ON(info->type != IRQT_IPI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	return info->u.ipi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) static unsigned virq_from_irq(unsigned irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	struct irq_info *info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	BUG_ON(info == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	BUG_ON(info->type != IRQT_VIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	return info->u.virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) static unsigned pirq_from_irq(unsigned irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	struct irq_info *info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	BUG_ON(info == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	BUG_ON(info->type != IRQT_PIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	return info->u.pirq.pirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) static enum xen_irq_type type_from_irq(unsigned irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	return info_for_irq(irq)->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) static unsigned cpu_from_irq(unsigned irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	return info_for_irq(irq)->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) unsigned int cpu_from_evtchn(evtchn_port_t evtchn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	int irq = get_evtchn_to_irq(evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	unsigned ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	if (irq != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		ret = cpu_from_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) static void do_mask(struct irq_info *info, u8 reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	raw_spin_lock_irqsave(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	if (!info->mask_reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		mask_evtchn(info->evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	info->mask_reason |= reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	raw_spin_unlock_irqrestore(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) static void do_unmask(struct irq_info *info, u8 reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	raw_spin_lock_irqsave(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	info->mask_reason &= ~reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	if (!info->mask_reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		unmask_evtchn(info->evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	raw_spin_unlock_irqrestore(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) #ifdef CONFIG_X86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) static bool pirq_check_eoi_map(unsigned irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	return test_bit(pirq_from_irq(irq), pirq_eoi_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) static bool pirq_needs_eoi_flag(unsigned irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	struct irq_info *info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	BUG_ON(info->type != IRQT_PIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	return info->u.pirq.flags & PIRQ_NEEDS_EOI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	int irq = get_evtchn_to_irq(evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	struct irq_info *info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	BUG_ON(irq == -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	info->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497)  * notify_remote_via_irq - send event to remote end of event channel via irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498)  * @irq: irq of event channel to send event to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500)  * Unlike notify_remote_via_evtchn(), this is safe to use across
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501)  * save/restore. Notifications on a broken connection are silently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502)  * dropped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) void notify_remote_via_irq(int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	evtchn_port_t evtchn = evtchn_from_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	if (VALID_EVTCHN(evtchn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		notify_remote_via_evtchn(evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) EXPORT_SYMBOL_GPL(notify_remote_via_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) struct lateeoi_work {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	struct delayed_work delayed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	spinlock_t eoi_list_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	struct list_head eoi_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) static DEFINE_PER_CPU(struct lateeoi_work, lateeoi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) static void lateeoi_list_del(struct irq_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	spin_lock_irqsave(&eoi->eoi_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	list_del_init(&info->eoi_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) static void lateeoi_list_add(struct irq_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	struct irq_info *elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	u64 now = get_jiffies_64();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	unsigned long delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	if (now < info->eoi_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		delay = info->eoi_time - now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		delay = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	spin_lock_irqsave(&eoi->eoi_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	if (list_empty(&eoi->eoi_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		list_add(&info->eoi_list, &eoi->eoi_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		mod_delayed_work_on(info->eoi_cpu, system_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 				    &eoi->delayed, delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		list_for_each_entry_reverse(elem, &eoi->eoi_list, eoi_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 			if (elem->eoi_time <= info->eoi_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		list_add(&info->eoi_list, &elem->eoi_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	evtchn_port_t evtchn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	unsigned int delay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	evtchn = info->evtchn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	if (spurious) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		if ((1 << info->spurious_cnt) < (HZ << 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 			info->spurious_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		if (info->spurious_cnt > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 			delay = 1 << (info->spurious_cnt - 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 			if (delay > HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 				delay = HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 			if (!info->eoi_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 				info->eoi_cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 			info->eoi_time = get_jiffies_64() + delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		info->spurious_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	cpu = info->eoi_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	if (info->eoi_time &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	    (info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		lateeoi_list_add(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	info->eoi_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	/* is_active hasn't been reset yet, do it now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	smp_store_release(&info->is_active, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	do_unmask(info, EVT_MASK_REASON_EOI_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) static void xen_irq_lateeoi_worker(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	struct lateeoi_work *eoi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	struct irq_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	u64 now = get_jiffies_64();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	read_lock_irqsave(&evtchn_rwlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		spin_lock(&eoi->eoi_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 						eoi_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		if (info == NULL || now < info->eoi_time) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 			spin_unlock(&eoi->eoi_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		list_del_init(&info->eoi_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		spin_unlock(&eoi->eoi_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		info->eoi_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		xen_irq_lateeoi_locked(info, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	if (info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		mod_delayed_work_on(info->eoi_cpu, system_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 				    &eoi->delayed, info->eoi_time - now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	read_unlock_irqrestore(&evtchn_rwlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) static void xen_cpu_init_eoi(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	struct lateeoi_work *eoi = &per_cpu(lateeoi, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	INIT_DELAYED_WORK(&eoi->delayed, xen_irq_lateeoi_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	spin_lock_init(&eoi->eoi_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	INIT_LIST_HEAD(&eoi->eoi_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	struct irq_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	read_lock_irqsave(&evtchn_rwlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	if (info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	read_unlock_irqrestore(&evtchn_rwlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) static void xen_irq_init(unsigned irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	struct irq_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	/* By default all event channels notify CPU#0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	info = kzalloc(sizeof(*info), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	if (info == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		panic("Unable to allocate metadata for IRQ%d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	info->type = IRQT_UNBOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	info->refcnt = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	set_info_for_irq(irq, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	INIT_LIST_HEAD(&info->eoi_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	list_add_tail(&info->list, &xen_irq_list_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) static int __must_check xen_allocate_irqs_dynamic(int nvec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	int i, irq = irq_alloc_descs(-1, 0, nvec, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	if (irq >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		for (i = 0; i < nvec; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 			xen_irq_init(irq + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) static inline int __must_check xen_allocate_irq_dynamic(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	return xen_allocate_irqs_dynamic(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) static int __must_check xen_allocate_irq_gsi(unsigned gsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	 * A PV guest has no concept of a GSI (since it has no ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	 * nor access to/knowledge of the physical APICs). Therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	 * all IRQs are dynamically allocated from the entire IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	 * space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	if (xen_pv_domain() && !xen_initial_domain())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		return xen_allocate_irq_dynamic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	/* Legacy IRQ descriptors are already allocated by the arch. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	if (gsi < nr_legacy_irqs())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		irq = gsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		irq = irq_alloc_desc_at(gsi, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	xen_irq_init(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) static void xen_free_irq(unsigned irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	struct irq_info *info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	if (WARN_ON(!info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	write_lock_irqsave(&evtchn_rwlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	if (!list_empty(&info->eoi_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		lateeoi_list_del(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	list_del(&info->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	set_info_for_irq(irq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	WARN_ON(info->refcnt > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	write_unlock_irqrestore(&evtchn_rwlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	kfree(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	/* Legacy IRQ descriptors are managed by the arch. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	if (irq < nr_legacy_irqs())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	irq_free_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) static void xen_evtchn_close(evtchn_port_t port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	struct evtchn_close close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	close.port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) static void event_handler_exit(struct irq_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	smp_store_release(&info->is_active, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	clear_evtchn(info->evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) static void pirq_query_unmask(int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	struct physdev_irq_status_query irq_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	struct irq_info *info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	BUG_ON(info->type != IRQT_PIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	irq_status.irq = pirq_from_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		irq_status.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	if (irq_status.flags & XENIRQSTAT_needs_eoi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		info->u.pirq.flags |= PIRQ_NEEDS_EOI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) static void eoi_pirq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	struct irq_info *info = info_for_irq(data->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	evtchn_port_t evtchn = info ? info->evtchn : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	if (!VALID_EVTCHN(evtchn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	if (unlikely(irqd_is_setaffinity_pending(data)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	    likely(!irqd_irq_disabled(data))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		do_mask(info, EVT_MASK_REASON_TEMPORARY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		event_handler_exit(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		irq_move_masked_irq(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		do_unmask(info, EVT_MASK_REASON_TEMPORARY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		event_handler_exit(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	if (pirq_needs_eoi(data->irq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		WARN_ON(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) static void mask_ack_pirq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	disable_dynirq(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	eoi_pirq(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) static unsigned int __startup_pirq(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	struct evtchn_bind_pirq bind_pirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	struct irq_info *info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	evtchn_port_t evtchn = evtchn_from_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	BUG_ON(info->type != IRQT_PIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	if (VALID_EVTCHN(evtchn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	bind_pirq.pirq = pirq_from_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	/* NB. We are happy to share unless we are probing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 					BIND_PIRQ__WILL_SHARE : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		pr_warn("Failed to obtain physical IRQ %d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	evtchn = bind_pirq.port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	pirq_query_unmask(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	rc = set_evtchn_to_irq(evtchn, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	info->evtchn = evtchn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	bind_evtchn_to_cpu(evtchn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	rc = xen_evtchn_port_setup(evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	do_unmask(info, EVT_MASK_REASON_EXPLICIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	eoi_pirq(irq_get_irq_data(irq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	xen_evtchn_close(evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) static unsigned int startup_pirq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	return __startup_pirq(data->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) static void shutdown_pirq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	unsigned int irq = data->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	struct irq_info *info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	evtchn_port_t evtchn = evtchn_from_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	BUG_ON(info->type != IRQT_PIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	if (!VALID_EVTCHN(evtchn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	do_mask(info, EVT_MASK_REASON_EXPLICIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	xen_evtchn_close(evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	xen_irq_info_cleanup(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) static void enable_pirq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	enable_dynirq(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) static void disable_pirq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	disable_dynirq(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) int xen_irq_from_gsi(unsigned gsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	struct irq_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	list_for_each_entry(info, &xen_irq_list_head, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		if (info->type != IRQT_PIRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		if (info->u.pirq.gsi == gsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 			return info->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) static void __unbind_from_irq(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	evtchn_port_t evtchn = evtchn_from_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	struct irq_info *info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	if (info->refcnt > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		info->refcnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		if (info->refcnt != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	if (VALID_EVTCHN(evtchn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		unsigned int cpu = cpu_from_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		xen_evtchn_close(evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		switch (type_from_irq(irq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		case IRQT_VIRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 			per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		case IRQT_IPI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 			per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		xen_irq_info_cleanup(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	xen_free_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952)  * Do not make any assumptions regarding the relationship between the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953)  * IRQ number returned here and the Xen pirq argument.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955)  * Note: We don't assign an event channel until the irq actually started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956)  * up.  Return an existing irq if we've already got one for the gsi.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958)  * Shareable implies level triggered, not shareable implies edge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959)  * triggered here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) int xen_bind_pirq_gsi_to_irq(unsigned gsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 			     unsigned pirq, int shareable, char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	int irq = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	struct physdev_irq irq_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	mutex_lock(&irq_mapping_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	irq = xen_irq_from_gsi(gsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	if (irq != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		pr_info("%s: returning irq %d for gsi %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 			__func__, irq, gsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	irq = xen_allocate_irq_gsi(gsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	irq_op.irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	irq_op.vector = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	/* Only the privileged domain can do this. For non-priv, the pcifront
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	 * driver provides a PCI bus that does the call to do exactly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	 * this in the priv domain. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	if (xen_initial_domain() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	    HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		xen_free_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		irq = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 			       shareable ? PIRQ_SHAREABLE : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		__unbind_from_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		irq = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	pirq_query_unmask(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	/* We try to use the handler with the appropriate semantic for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	 * type of interrupt: if the interrupt is an edge triggered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	 * interrupt we use handle_edge_irq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	 * On the other hand if the interrupt is level triggered we use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	 * handle_fasteoi_irq like the native code does for this kind of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	 * interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	 * Depending on the Xen version, pirq_needs_eoi might return true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	 * not only for level triggered interrupts but for edge triggered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	 * interrupts too. In any case Xen always honors the eoi mechanism,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	 * not injecting any more pirqs of the same kind if the first one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	 * hasn't received an eoi yet. Therefore using the fasteoi handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	 * is the right choice either way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	if (shareable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 				handle_fasteoi_irq, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 				handle_edge_irq, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	mutex_unlock(&irq_mapping_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) #ifdef CONFIG_PCI_MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	struct physdev_get_free_pirq op_get_free_pirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	WARN_ONCE(rc == -ENOSYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		  "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	return rc ? -1 : op_get_free_pirq.pirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 			     int pirq, int nvec, const char *name, domid_t domid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	int i, irq, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	mutex_lock(&irq_mapping_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	irq = xen_allocate_irqs_dynamic(nvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	for (i = 0; i < nvec; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 					      i == 0 ? 0 : PIRQ_MSI_GROUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			goto error_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	ret = irq_set_msi_desc(irq, msidesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		goto error_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	mutex_unlock(&irq_mapping_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) error_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	while (nvec--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		__unbind_from_irq(irq + nvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	mutex_unlock(&irq_mapping_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) int xen_destroy_irq(int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	struct physdev_unmap_pirq unmap_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	struct irq_info *info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	int rc = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	mutex_lock(&irq_mapping_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	 * If trying to remove a vector in a MSI group different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	 * than the first one skip the PIRQ unmap unless this vector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	 * is the first one in the group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		unmap_irq.pirq = info->u.pirq.pirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		unmap_irq.domid = info->u.pirq.domid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		/* If another domain quits without making the pci_disable_msix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		 * call, the Xen hypervisor takes care of freeing the PIRQs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		 * (free_domain_pirqs).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 			pr_info("domain %d does not have %d anymore\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 				info->u.pirq.domid, info->u.pirq.pirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		else if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 			pr_warn("unmap irq failed %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	xen_free_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	mutex_unlock(&irq_mapping_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) int xen_irq_from_pirq(unsigned pirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	struct irq_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	mutex_lock(&irq_mapping_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	list_for_each_entry(info, &xen_irq_list_head, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		if (info->type != IRQT_PIRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		irq = info->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		if (info->u.pirq.pirq == pirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	irq = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	mutex_unlock(&irq_mapping_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) int xen_pirq_from_irq(unsigned irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	return pirq_from_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	if (evtchn >= xen_evtchn_max_channels())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	mutex_lock(&irq_mapping_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	irq = get_evtchn_to_irq(evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	if (irq == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		irq = xen_allocate_irq_dynamic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		irq_set_chip_and_handler_name(irq, chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 					      handle_edge_irq, "event");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		ret = xen_irq_info_evtchn_setup(irq, evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 			__unbind_from_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 			irq = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 		/* New interdomain events are bound to VCPU 0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		bind_evtchn_to_cpu(evtchn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		struct irq_info *info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	mutex_unlock(&irq_mapping_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) int bind_evtchn_to_irq(evtchn_port_t evtchn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	struct evtchn_bind_ipi bind_ipi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	evtchn_port_t evtchn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	int ret, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	mutex_lock(&irq_mapping_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	irq = per_cpu(ipi_to_irq, cpu)[ipi];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	if (irq == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		irq = xen_allocate_irq_dynamic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 					      handle_percpu_irq, "ipi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		bind_ipi.vcpu = xen_vcpu_nr(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 						&bind_ipi) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 			BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		evtchn = bind_ipi.port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 			__unbind_from_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 			irq = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		bind_evtchn_to_cpu(evtchn, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		struct irq_info *info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		WARN_ON(info == NULL || info->type != IRQT_IPI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	mutex_unlock(&irq_mapping_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) static int bind_interdomain_evtchn_to_irq_chip(unsigned int remote_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 					       evtchn_port_t remote_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 					       struct irq_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	struct evtchn_bind_interdomain bind_interdomain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	bind_interdomain.remote_dom  = remote_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	bind_interdomain.remote_port = remote_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 					  &bind_interdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	return err ? : bind_evtchn_to_irq_chip(bind_interdomain.local_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 					       chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 					   evtchn_port_t remote_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 						   &xen_lateeoi_chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	struct evtchn_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	evtchn_port_t port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	int rc = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	memset(&status, 0, sizeof(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	for (port = 0; port < xen_evtchn_max_channels(); port++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		status.dom = DOMID_SELF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		status.port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		if (status.status != EVTCHNSTAT_virq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 			*evtchn = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)  * xen_evtchn_nr_channels - number of usable event channel ports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)  * This may be less than the maximum supported by the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)  * hypervisor ABI. Use xen_evtchn_max_channels() for the maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)  * supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) unsigned xen_evtchn_nr_channels(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)         return evtchn_ops->nr_channels();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	struct evtchn_bind_virq bind_virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	evtchn_port_t evtchn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	int irq, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	mutex_lock(&irq_mapping_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	irq = per_cpu(virq_to_irq, cpu)[virq];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	if (irq == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		irq = xen_allocate_irq_dynamic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		if (percpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 			irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 						      handle_percpu_irq, "virq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 			irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 						      handle_edge_irq, "virq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		bind_virq.virq = virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		bind_virq.vcpu = xen_vcpu_nr(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 						&bind_virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 			evtchn = bind_virq.port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 			if (ret == -EEXIST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 				ret = find_virq(virq, cpu, &evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 			BUG_ON(ret < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 			__unbind_from_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 			irq = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		bind_evtchn_to_cpu(evtchn, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 		struct irq_info *info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		WARN_ON(info == NULL || info->type != IRQT_VIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	mutex_unlock(&irq_mapping_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) static void unbind_from_irq(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	mutex_lock(&irq_mapping_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	__unbind_from_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	mutex_unlock(&irq_mapping_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) static int bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 					  irq_handler_t handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 					  unsigned long irqflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 					  const char *devname, void *dev_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 					  struct irq_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	int irq, retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	irq = bind_evtchn_to_irq_chip(evtchn, chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	if (retval != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		unbind_from_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 			      irq_handler_t handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 			      unsigned long irqflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 			      const char *devname, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 					      devname, dev_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 					      &xen_dynamic_chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 				      irq_handler_t handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 				      unsigned long irqflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 				      const char *devname, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 					      devname, dev_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 					      &xen_lateeoi_chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler_lateeoi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) static int bind_interdomain_evtchn_to_irqhandler_chip(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 		unsigned int remote_domain, evtchn_port_t remote_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		irq_handler_t handler, unsigned long irqflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		const char *devname, void *dev_id, struct irq_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	int irq, retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	irq = bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 						  chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	if (retval != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		unbind_from_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 						  evtchn_port_t remote_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 						  irq_handler_t handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 						  unsigned long irqflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 						  const char *devname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 						  void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 				remote_port, handler, irqflags, devname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 				dev_id, &xen_lateeoi_chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler_lateeoi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 			    irq_handler_t handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 			    unsigned long irqflags, const char *devname, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	int irq, retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	if (retval != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		unbind_from_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) int bind_ipi_to_irqhandler(enum ipi_vector ipi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 			   unsigned int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 			   irq_handler_t handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 			   unsigned long irqflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 			   const char *devname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 			   void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	int irq, retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	irq = bind_ipi_to_irq(ipi, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	if (retval != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 		unbind_from_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 		return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) void unbind_from_irqhandler(unsigned int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	struct irq_info *info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	if (WARN_ON(!info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	free_irq(irq, dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	unbind_from_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)  * xen_set_irq_priority() - set an event channel priority.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)  * @irq:irq bound to an event channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)  * @priority: priority between XEN_IRQ_PRIORITY_MAX and XEN_IRQ_PRIORITY_MIN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) int xen_set_irq_priority(unsigned irq, unsigned priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	struct evtchn_set_priority set_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	set_priority.port = evtchn_from_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	set_priority.priority = priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	return HYPERVISOR_event_channel_op(EVTCHNOP_set_priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 					   &set_priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) EXPORT_SYMBOL_GPL(xen_set_irq_priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) int evtchn_make_refcounted(evtchn_port_t evtchn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	int irq = get_evtchn_to_irq(evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	struct irq_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	if (irq == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	if (!info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	WARN_ON(info->refcnt != -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	info->refcnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) int evtchn_get(evtchn_port_t evtchn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	struct irq_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	int err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	if (evtchn >= xen_evtchn_max_channels())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	mutex_lock(&irq_mapping_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	irq = get_evtchn_to_irq(evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	if (irq == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	if (!info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	if (info->refcnt <= 0 || info->refcnt == SHRT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	info->refcnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)  done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	mutex_unlock(&irq_mapping_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) EXPORT_SYMBOL_GPL(evtchn_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) void evtchn_put(evtchn_port_t evtchn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	int irq = get_evtchn_to_irq(evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	if (WARN_ON(irq == -1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	unbind_from_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) EXPORT_SYMBOL_GPL(evtchn_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) #ifdef CONFIG_X86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	if (unlikely(vector == XEN_NMI_VECTOR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		int rc =  HYPERVISOR_vcpu_op(VCPUOP_send_nmi, xen_vcpu_nr(cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 					     NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 			printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	irq = per_cpu(ipi_to_irq, cpu)[vector];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	BUG_ON(irq < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	notify_remote_via_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) struct evtchn_loop_ctrl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	ktime_t timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	unsigned count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	bool defer_eoi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	struct irq_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	irq = get_evtchn_to_irq(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	if (irq == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	 * Check for timeout every 256 events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	 * We are setting the timeout value only after the first 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	 * events in order to not hurt the common case of few loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	 * iterations. The 256 is basically an arbitrary value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	 * In case we are hitting the timeout we need to defer all further
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	 * EOIs in order to ensure to leave the event handling loop rather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	 * sooner than later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	if (!ctrl->defer_eoi && !(++ctrl->count & 0xff)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 		ktime_t kt = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 		if (!ctrl->timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 			kt = ktime_add_ms(kt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 					  jiffies_to_msecs(event_loop_timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 			ctrl->timeout = kt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 		} else if (kt > ctrl->timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 			ctrl->defer_eoi = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	if (xchg_acquire(&info->is_active, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	if (ctrl->defer_eoi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 		info->eoi_cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 		info->irq_epoch = __this_cpu_read(irq_epoch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 		info->eoi_time = get_jiffies_64() + event_eoi_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	generic_handle_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) static void __xen_evtchn_do_upcall(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	struct evtchn_loop_ctrl ctrl = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	read_lock(&evtchn_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 		vcpu_info->evtchn_upcall_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 		xen_evtchn_handle_events(cpu, &ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 		BUG_ON(!irqs_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 		virt_rmb(); /* Hypervisor can set upcall pending. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	} while (vcpu_info->evtchn_upcall_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	read_unlock(&evtchn_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	 * Increment irq_epoch only now to defer EOIs only for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	 * xen_irq_lateeoi() invocations occurring from inside the loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	 * above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	__this_cpu_inc(irq_epoch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) void xen_evtchn_do_upcall(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	struct pt_regs *old_regs = set_irq_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	irq_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	__xen_evtchn_do_upcall();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	irq_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	set_irq_regs(old_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) void xen_hvm_evtchn_do_upcall(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	__xen_evtchn_do_upcall();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) /* Rebind a new event channel to an existing irq. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	struct irq_info *info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	if (WARN_ON(!info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	/* Make sure the irq is masked, since the new event channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	   will also be masked. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	disable_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	mutex_lock(&irq_mapping_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	/* After resume the irq<->evtchn mappings are all cleared out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	BUG_ON(get_evtchn_to_irq(evtchn) != -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	/* Expect irq to have been bound before,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	   so there should be a proper type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	BUG_ON(info->type == IRQT_UNBOUND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	(void)xen_irq_info_evtchn_setup(irq, evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	mutex_unlock(&irq_mapping_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)         bind_evtchn_to_cpu(evtchn, info->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	/* This will be deferred until interrupt is processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	irq_set_affinity(irq, cpumask_of(info->cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	/* Unmask the event channel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	enable_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) /* Rebind an evtchn so that it gets delivered to a specific cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	struct evtchn_bind_vcpu bind_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	evtchn_port_t evtchn = info ? info->evtchn : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	if (!VALID_EVTCHN(evtchn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	if (!xen_support_evtchn_rebind())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	/* Send future instances of this interrupt to other vcpu. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	bind_vcpu.port = evtchn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	bind_vcpu.vcpu = xen_vcpu_nr(tcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	 * Mask the event while changing the VCPU binding to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	 * it being delivered on an unexpected VCPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	do_mask(info, EVT_MASK_REASON_TEMPORARY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	 * If this fails, it usually just indicates that we're dealing with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	 * virq or IPI channel, which don't actually need to be rebound. Ignore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	 * it, but don't do the xenlinux-level rebind in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		bind_evtchn_to_cpu(evtchn, tcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	do_unmask(info, EVT_MASK_REASON_TEMPORARY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 			    bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	unsigned tcpu = cpumask_first_and(dest, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	int ret = xen_rebind_evtchn_to_cpu(info_for_irq(data->irq), tcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 		irq_data_update_effective_affinity(data, cpumask_of(tcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) /* To be called with desc->lock held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	struct irq_data *d = irq_desc_get_irq_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	return set_affinity_irq(d, cpumask_of(tcpu), false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) static void enable_dynirq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	struct irq_info *info = info_for_irq(data->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	evtchn_port_t evtchn = info ? info->evtchn : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	if (VALID_EVTCHN(evtchn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 		do_unmask(info, EVT_MASK_REASON_EXPLICIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) static void disable_dynirq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	struct irq_info *info = info_for_irq(data->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	evtchn_port_t evtchn = info ? info->evtchn : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	if (VALID_EVTCHN(evtchn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 		do_mask(info, EVT_MASK_REASON_EXPLICIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) static void ack_dynirq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	struct irq_info *info = info_for_irq(data->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	evtchn_port_t evtchn = info ? info->evtchn : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	if (!VALID_EVTCHN(evtchn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	if (unlikely(irqd_is_setaffinity_pending(data)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	    likely(!irqd_irq_disabled(data))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 		do_mask(info, EVT_MASK_REASON_TEMPORARY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 		event_handler_exit(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 		irq_move_masked_irq(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 		do_unmask(info, EVT_MASK_REASON_TEMPORARY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 		event_handler_exit(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) static void mask_ack_dynirq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	disable_dynirq(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	ack_dynirq(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) static void lateeoi_ack_dynirq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	struct irq_info *info = info_for_irq(data->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	evtchn_port_t evtchn = info ? info->evtchn : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	if (!VALID_EVTCHN(evtchn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	do_mask(info, EVT_MASK_REASON_EOI_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	if (unlikely(irqd_is_setaffinity_pending(data)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	    likely(!irqd_irq_disabled(data))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 		do_mask(info, EVT_MASK_REASON_TEMPORARY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 		clear_evtchn(evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 		irq_move_masked_irq(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 		do_unmask(info, EVT_MASK_REASON_TEMPORARY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 		clear_evtchn(evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) static void lateeoi_mask_ack_dynirq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	struct irq_info *info = info_for_irq(data->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	evtchn_port_t evtchn = info ? info->evtchn : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	if (VALID_EVTCHN(evtchn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 		do_mask(info, EVT_MASK_REASON_EXPLICIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 		ack_dynirq(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) static int retrigger_dynirq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	struct irq_info *info = info_for_irq(data->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	evtchn_port_t evtchn = info ? info->evtchn : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	if (!VALID_EVTCHN(evtchn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	do_mask(info, EVT_MASK_REASON_TEMPORARY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	set_evtchn(evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	do_unmask(info, EVT_MASK_REASON_TEMPORARY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) static void restore_pirqs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	int pirq, rc, irq, gsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	struct physdev_map_pirq map_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	struct irq_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	list_for_each_entry(info, &xen_irq_list_head, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 		if (info->type != IRQT_PIRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 		pirq = info->u.pirq.pirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 		gsi = info->u.pirq.gsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 		irq = info->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 		/* save/restore of PT devices doesn't work, so at this point the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 		 * only devices present are GSI based emulated devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 		if (!gsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 		map_irq.domid = DOMID_SELF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 		map_irq.type = MAP_PIRQ_TYPE_GSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 		map_irq.index = gsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 		map_irq.pirq = pirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 		if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 			pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 				gsi, irq, pirq, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 			xen_free_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 		printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 		__startup_pirq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) static void restore_cpu_virqs(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	struct evtchn_bind_virq bind_virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	evtchn_port_t evtchn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	int virq, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	for (virq = 0; virq < NR_VIRQS; virq++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 		if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 		BUG_ON(virq_from_irq(irq) != virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 		/* Get a new binding from Xen. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 		bind_virq.virq = virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 		bind_virq.vcpu = xen_vcpu_nr(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 						&bind_virq) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 			BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 		evtchn = bind_virq.port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 		/* Record the new mapping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 		(void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 		bind_evtchn_to_cpu(evtchn, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) static void restore_cpu_ipis(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	struct evtchn_bind_ipi bind_ipi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	evtchn_port_t evtchn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	int ipi, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 		if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 		BUG_ON(ipi_from_irq(irq) != ipi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 		/* Get a new binding from Xen. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 		bind_ipi.vcpu = xen_vcpu_nr(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 						&bind_ipi) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 			BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 		evtchn = bind_ipi.port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 		/* Record the new mapping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 		(void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 		bind_evtchn_to_cpu(evtchn, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) /* Clear an irq's pending state, in preparation for polling on it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) void xen_clear_irq_pending(int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	struct irq_info *info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	evtchn_port_t evtchn = info ? info->evtchn : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	if (VALID_EVTCHN(evtchn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 		event_handler_exit(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) EXPORT_SYMBOL(xen_clear_irq_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) void xen_set_irq_pending(int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	evtchn_port_t evtchn = evtchn_from_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	if (VALID_EVTCHN(evtchn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		set_evtchn(evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) bool xen_test_irq_pending(int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	evtchn_port_t evtchn = evtchn_from_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	if (VALID_EVTCHN(evtchn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		ret = test_evtchn(evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) /* Poll waiting for an irq to become pending with timeout.  In the usual case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)  * the irq will be disabled so it won't deliver an interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) void xen_poll_irq_timeout(int irq, u64 timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	evtchn_port_t evtchn = evtchn_from_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	if (VALID_EVTCHN(evtchn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 		struct sched_poll poll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 		poll.nr_ports = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 		poll.timeout = timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		set_xen_guest_handle(poll.ports, &evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 		if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 			BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) EXPORT_SYMBOL(xen_poll_irq_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) /* Poll waiting for an irq to become pending.  In the usual case, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)  * irq will be disabled so it won't deliver an interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) void xen_poll_irq(int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	xen_poll_irq_timeout(irq, 0 /* no timeout */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) /* Check whether the IRQ line is shared with other guests. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) int xen_test_irq_shared(int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	struct irq_info *info = info_for_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 	struct physdev_irq_status_query irq_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	if (WARN_ON(!info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	irq_status.irq = info->u.pirq.pirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	return !(irq_status.flags & XENIRQSTAT_shared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) EXPORT_SYMBOL_GPL(xen_test_irq_shared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) void xen_irq_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	struct irq_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	/* New event-channel space is not 'live' yet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	xen_evtchn_resume();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	/* No IRQ <-> event-channel mappings. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	list_for_each_entry(info, &xen_irq_list_head, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 		info->evtchn = 0; /* zap event-channel binding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	clear_evtchn_to_irq_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 		restore_cpu_virqs(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 		restore_cpu_ipis(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	restore_pirqs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) static struct irq_chip xen_dynamic_chip __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	.name			= "xen-dyn",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	.irq_disable		= disable_dynirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	.irq_mask		= disable_dynirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 	.irq_unmask		= enable_dynirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 	.irq_ack		= ack_dynirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	.irq_mask_ack		= mask_ack_dynirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	.irq_set_affinity	= set_affinity_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	.irq_retrigger		= retrigger_dynirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) static struct irq_chip xen_lateeoi_chip __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	/* The chip name needs to contain "xen-dyn" for irqbalance to work. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	.name			= "xen-dyn-lateeoi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	.irq_disable		= disable_dynirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	.irq_mask		= disable_dynirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	.irq_unmask		= enable_dynirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	.irq_ack		= lateeoi_ack_dynirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	.irq_mask_ack		= lateeoi_mask_ack_dynirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	.irq_set_affinity	= set_affinity_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	.irq_retrigger		= retrigger_dynirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) static struct irq_chip xen_pirq_chip __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	.name			= "xen-pirq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	.irq_startup		= startup_pirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	.irq_shutdown		= shutdown_pirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	.irq_enable		= enable_pirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	.irq_disable		= disable_pirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	.irq_mask		= disable_dynirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	.irq_unmask		= enable_dynirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	.irq_ack		= eoi_pirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	.irq_eoi		= eoi_pirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	.irq_mask_ack		= mask_ack_pirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	.irq_set_affinity	= set_affinity_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	.irq_retrigger		= retrigger_dynirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) static struct irq_chip xen_percpu_chip __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 	.name			= "xen-percpu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	.irq_disable		= disable_dynirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	.irq_mask		= disable_dynirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	.irq_unmask		= enable_dynirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	.irq_ack		= ack_dynirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) #ifdef CONFIG_XEN_PVHVM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) /* Vector callbacks are better than PCI interrupts to receive event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108)  * channel notifications because we can receive vector callbacks on any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)  * vcpu and we don't need PCI support or APIC interactions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) void xen_setup_callback_vector(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	uint64_t callback_via;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	if (xen_have_vector_callback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 		callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 		if (xen_set_callback_via(callback_via)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 			pr_err("Request for Xen HVM callback vector failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 			xen_have_vector_callback = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) static __init void xen_alloc_callback_vector(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	if (!xen_have_vector_callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	pr_info("Xen HVM callback vector for event delivery is enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_xen_hvm_callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) void xen_setup_callback_vector(void) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) static inline void xen_alloc_callback_vector(void) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) bool xen_fifo_events = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) module_param_named(fifo_events, xen_fifo_events, bool, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) static int xen_evtchn_cpu_prepare(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	xen_cpu_init_eoi(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 	if (evtchn_ops->percpu_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 		ret = evtchn_ops->percpu_init(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) static int xen_evtchn_cpu_dead(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	if (evtchn_ops->percpu_deinit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 		ret = evtchn_ops->percpu_deinit(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) void __init xen_init_IRQ(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	evtchn_port_t evtchn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 	if (xen_fifo_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 		ret = xen_evtchn_fifo_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 		xen_evtchn_2l_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 		xen_fifo_events = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 	xen_cpu_init_eoi(smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 	cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 				  "xen/evtchn:prepare",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 				  xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 				sizeof(*evtchn_to_irq), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	BUG_ON(!evtchn_to_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	/* No event channels are 'live' right now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 		mask_evtchn(evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 	pirq_needs_eoi = pirq_needs_eoi_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) #ifdef CONFIG_X86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	if (xen_pv_domain()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 		if (xen_initial_domain())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 			pci_xen_initial_domain();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 	if (xen_feature(XENFEAT_hvm_callback_vector)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 		xen_setup_callback_vector();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 		xen_alloc_callback_vector();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	if (xen_hvm_domain()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 		native_init_IRQ();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 		/* pci_xen_hvm_init must be called after native_init_IRQ so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 		 * __acpi_register_gsi can point at the right function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 		pci_xen_hvm_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 		int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 		struct physdev_pirq_eoi_gmfn eoi_gmfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 		pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 		eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 		if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 			free_page((unsigned long) pirq_eoi_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 			pirq_eoi_map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 			pirq_needs_eoi = pirq_check_eoi_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) }