Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * linux/arch/ia64/kernel/irq_ia64.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 1998-2001 Hewlett-Packard Co
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *	Stephane Eranian <eranian@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *	David Mosberger-Tang <davidm@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *  6/10/99: Updated to bring in sync with x86 version to facilitate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *	     support for SMP and different interrupt controllers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  *                      PCI to vector allocation routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  *						Added CPU Hotplug handling for IPF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <linux/threads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <linux/ratelimit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #include <asm/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #include <asm/intrinsics.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #include <asm/hw_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define IRQ_DEBUG	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define IRQ_VECTOR_UNASSIGNED	(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define IRQ_UNUSED		(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define IRQ_USED		(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define IRQ_RSVD		(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) /* default base addr of IPI table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) void __iomem *ipi_base_addr = ((void __iomem *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 			       (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) static cpumask_t vector_allocation_domain(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * Legacy IRQ to IA-64 vector translation table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) __u8 isa_irq_to_vector_map[16] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	/* 8259 IRQ translation, first 16 entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) EXPORT_SYMBOL(isa_irq_to_vector_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) DEFINE_SPINLOCK(vector_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	[0 ... NR_IRQS - 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		.vector = IRQ_VECTOR_UNASSIGNED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		.domain = CPU_MASK_NONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	[0 ... IA64_NUM_VECTORS - 1] = -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) static cpumask_t vector_table[IA64_NUM_VECTORS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	[0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) static int irq_status[NR_IRQS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	[0 ... NR_IRQS -1] = IRQ_UNUSED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) static inline int find_unassigned_irq(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		if (irq_status[irq] == IRQ_UNUSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 			return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static inline int find_unassigned_vector(cpumask_t domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	cpumask_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	int pos, vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	cpumask_and(&mask, &domain, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	if (cpumask_empty(&mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		vector = IA64_FIRST_DEVICE_VECTOR + pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		cpumask_and(&mask, &domain, &vector_table[vector]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		if (!cpumask_empty(&mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		return vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	cpumask_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	struct irq_cfg *cfg = &irq_cfg[irq];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	BUG_ON((unsigned)irq >= NR_IRQS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	cpumask_and(&mask, &domain, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	if (cpumask_empty(&mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	if ((cfg->vector == vector) && cpumask_equal(&cfg->domain, &domain))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	for_each_cpu(cpu, &mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		per_cpu(vector_irq, cpu)[vector] = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	cfg->vector = vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	cfg->domain = domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	irq_status[irq] = IRQ_USED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	cpumask_or(&vector_table[vector], &vector_table[vector], &domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int bind_irq_vector(int irq, int vector, cpumask_t domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	spin_lock_irqsave(&vector_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	ret = __bind_irq_vector(irq, vector, domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	spin_unlock_irqrestore(&vector_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static void __clear_irq_vector(int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	int vector, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	cpumask_t domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	struct irq_cfg *cfg = &irq_cfg[irq];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	BUG_ON((unsigned)irq >= NR_IRQS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	vector = cfg->vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	domain = cfg->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	for_each_cpu_and(cpu, &cfg->domain, cpu_online_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		per_cpu(vector_irq, cpu)[vector] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	cfg->vector = IRQ_VECTOR_UNASSIGNED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	cfg->domain = CPU_MASK_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	irq_status[irq] = IRQ_UNUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	cpumask_andnot(&vector_table[vector], &vector_table[vector], &domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static void clear_irq_vector(int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	spin_lock_irqsave(&vector_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	__clear_irq_vector(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	spin_unlock_irqrestore(&vector_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) ia64_native_assign_irq_vector (int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	int vector, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	cpumask_t domain = CPU_MASK_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	vector = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	spin_lock_irqsave(&vector_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		domain = vector_allocation_domain(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		vector = find_unassigned_vector(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		if (vector >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	if (vector < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	if (irq == AUTO_ASSIGN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		irq = vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	BUG_ON(__bind_irq_vector(irq, vector, domain));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	spin_unlock_irqrestore(&vector_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	return vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ia64_native_free_irq_vector (int vector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	if (vector < IA64_FIRST_DEVICE_VECTOR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	    vector > IA64_LAST_DEVICE_VECTOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	clear_irq_vector(vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) reserve_irq_vector (int vector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	if (vector < IA64_FIRST_DEVICE_VECTOR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	    vector > IA64_LAST_DEVICE_VECTOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  * Initialize vector_irq on a new cpu. This function must be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  * with vector_lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) void __setup_vector_irq(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	int irq, vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	/* Clear vector_irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		per_cpu(vector_irq, cpu)[vector] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	/* Mark the inuse vectors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	for (irq = 0; irq < NR_IRQS; ++irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		if (!cpumask_test_cpu(cpu, &irq_cfg[irq].domain))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		vector = irq_to_vector(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		per_cpu(vector_irq, cpu)[vector] = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static enum vector_domain_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	VECTOR_DOMAIN_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	VECTOR_DOMAIN_PERCPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) } vector_domain_type = VECTOR_DOMAIN_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static cpumask_t vector_allocation_domain(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		return *cpumask_of(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	return CPU_MASK_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static int __irq_prepare_move(int irq, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	struct irq_cfg *cfg = &irq_cfg[irq];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	int vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	cpumask_t domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	if (cfg->move_in_progress || cfg->move_cleanup_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	if (cpumask_test_cpu(cpu, &cfg->domain))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	domain = vector_allocation_domain(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	vector = find_unassigned_vector(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	if (vector < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	cfg->move_in_progress = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	cfg->old_domain = cfg->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	cfg->vector = IRQ_VECTOR_UNASSIGNED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	cfg->domain = CPU_MASK_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	BUG_ON(__bind_irq_vector(irq, vector, domain));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) int irq_prepare_move(int irq, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	spin_lock_irqsave(&vector_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	ret = __irq_prepare_move(irq, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	spin_unlock_irqrestore(&vector_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) void irq_complete_move(unsigned irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	struct irq_cfg *cfg = &irq_cfg[irq];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	cpumask_t cleanup_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	if (likely(!cfg->move_in_progress))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	if (unlikely(cpumask_test_cpu(smp_processor_id(), &cfg->old_domain)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	cfg->move_cleanup_count = cpumask_weight(&cleanup_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	for_each_cpu(i, &cleanup_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		ia64_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	cfg->move_in_progress = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	int me = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	ia64_vector vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	for (vector = IA64_FIRST_DEVICE_VECTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	     vector < IA64_LAST_DEVICE_VECTOR; vector++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		struct irq_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		struct irq_cfg *cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		irq = __this_cpu_read(vector_irq[vector]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		desc = irq_to_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		cfg = irq_cfg + irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		raw_spin_lock(&desc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		if (!cfg->move_cleanup_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 			goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		if (!cpumask_test_cpu(me, &cfg->old_domain))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 			goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		spin_lock_irqsave(&vector_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		__this_cpu_write(vector_irq[vector], -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		cpumask_clear_cpu(me, &vector_table[vector]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		spin_unlock_irqrestore(&vector_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		cfg->move_cleanup_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		raw_spin_unlock(&desc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static int __init parse_vector_domain(char *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	if (!arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	if (!strcmp(arg, "percpu")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		vector_domain_type = VECTOR_DOMAIN_PERCPU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		no_int_routing = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) early_param("vector", parse_vector_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static cpumask_t vector_allocation_domain(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	return CPU_MASK_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) void destroy_and_reserve_irq(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	irq_init_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	spin_lock_irqsave(&vector_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	__clear_irq_vector(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	irq_status[irq] = IRQ_RSVD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	spin_unlock_irqrestore(&vector_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)  * Dynamic irq allocate and deallocation for MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) int create_irq(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	int irq, vector, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	cpumask_t domain = CPU_MASK_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	irq = vector = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	spin_lock_irqsave(&vector_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		domain = vector_allocation_domain(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		vector = find_unassigned_vector(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		if (vector >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	if (vector < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	irq = find_unassigned_irq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	BUG_ON(__bind_irq_vector(irq, vector, domain));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	spin_unlock_irqrestore(&vector_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	if (irq >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		irq_init_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) void destroy_irq(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	irq_init_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	clear_irq_vector(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) #	define IS_RESCHEDULE(vec)	(vec == IA64_IPI_RESCHEDULE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) #	define IS_LOCAL_TLB_FLUSH(vec)	(vec == IA64_IPI_LOCAL_TLB_FLUSH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) #	define IS_RESCHEDULE(vec)	(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) #	define IS_LOCAL_TLB_FLUSH(vec)	(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)  * That's where the IVT branches when we get an external
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)  * interrupt. This branches to the correct hardware IRQ handler via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)  * function ptr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	struct pt_regs *old_regs = set_irq_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	unsigned long saved_tpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) #if IRQ_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		unsigned long bsp, sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		 * Note: if the interrupt happened while executing in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		 * the context switch routine (ia64_switch_to), we may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		 * get a spurious stack overflow here.  This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		 * because the register and the memory stack are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		 * switched atomically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		bsp = ia64_getreg(_IA64_REG_AR_BSP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 		sp = ia64_getreg(_IA64_REG_SP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		if ((sp - bsp) < 1024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 			static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 			if (__ratelimit(&ratelimit)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 				printk("ia64_handle_irq: DANGER: less than "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 				       "1KB of free stack space!!\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 				       "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) #endif /* IRQ_DEBUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	 * Always set TPR to limit maximum interrupt nesting depth to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	 * 16 (without this, it would be ~240, which could easily lead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	 * to kernel stack overflows).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	irq_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	ia64_srlz_d();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	while (vector != IA64_SPURIOUS_INT_VECTOR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		int irq = local_vector_to_irq(vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 			smp_local_flush_tlb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 			kstat_incr_irq_this_cpu(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		} else if (unlikely(IS_RESCHEDULE(vector))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 			scheduler_ipi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 			kstat_incr_irq_this_cpu(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 			ia64_setreg(_IA64_REG_CR_TPR, vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 			ia64_srlz_d();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 			if (unlikely(irq < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 				printk(KERN_ERR "%s: Unexpected interrupt "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 				       "vector %d on CPU %d is not mapped "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 				       "to any IRQ!\n", __func__, vector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 				       smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 				generic_handle_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 			 * Disable interrupts and send EOI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 			local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 			ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		ia64_eoi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		vector = ia64_get_ivr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	 * This must be done *after* the ia64_eoi().  For example, the keyboard softirq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	 * handler needs to be able to wait for further keyboard interrupts, which can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	 * come through until ia64_eoi() has been done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	irq_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	set_irq_regs(old_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)  * This function emulates a interrupt processing when a cpu is about to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)  * brought down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) void ia64_process_pending_intr(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	ia64_vector vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	unsigned long saved_tpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	extern unsigned int vectors_in_migration[NR_IRQS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	vector = ia64_get_ivr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	irq_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	ia64_srlz_d();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	 /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	  * Perform normal interrupt style processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	while (vector != IA64_SPURIOUS_INT_VECTOR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 		int irq = local_vector_to_irq(vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 		if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 			smp_local_flush_tlb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 			kstat_incr_irq_this_cpu(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 		} else if (unlikely(IS_RESCHEDULE(vector))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 			kstat_incr_irq_this_cpu(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 			struct pt_regs *old_regs = set_irq_regs(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 			ia64_setreg(_IA64_REG_CR_TPR, vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 			ia64_srlz_d();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 			 * Now try calling normal ia64_handle_irq as it would have got called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 			 * from a real intr handler. Try passing null for pt_regs, hopefully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 			 * it will work. I hope it works!.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 			 * Probably could shared code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 			if (unlikely(irq < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 				printk(KERN_ERR "%s: Unexpected interrupt "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 				       "vector %d on CPU %d not being mapped "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 				       "to any IRQ!!\n", __func__, vector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 				       smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 				vectors_in_migration[irq]=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 				generic_handle_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 			set_irq_regs(old_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 			 * Disable interrupts and send EOI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 			local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 			ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 		ia64_eoi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		vector = ia64_get_ivr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	irq_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static irqreturn_t dummy_handler (int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)  * KVM uses this interrupt to force a cpu out of guest mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) register_percpu_irq(ia64_vector vec, irq_handler_t handler, unsigned long flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 		    const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	unsigned int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	irq = vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	irq_set_status_flags(irq, IRQ_PER_CPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	irq_set_chip(irq, &irq_type_ia64_lsapic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	if (handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 		if (request_irq(irq, handler, flags, name, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 			pr_err("Failed to request irq %u (%s)\n", irq, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	irq_set_handler(irq, handle_percpu_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) ia64_native_register_ipi(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	register_percpu_irq(IA64_IPI_VECTOR, handle_IPI, 0, "IPI");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	register_percpu_irq(IA64_IPI_RESCHEDULE, dummy_handler, 0, "resched");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, dummy_handler, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 			    "tlb_flush");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) init_IRQ (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	acpi_boot_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	ia64_register_ipi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	if (vector_domain_type != VECTOR_DOMAIN_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 		register_percpu_irq(IA64_IRQ_MOVE_VECTOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 				    smp_irq_move_cleanup_interrupt, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 				    "irq_move");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	void __iomem *ipi_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	unsigned long ipi_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	unsigned long phys_cpu_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	phys_cpu_id = cpu_physical_id(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	 * cpu number is in 8bit ID and 8bit EID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	ipi_data = (delivery_mode << 8) | (vector & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	writeq(ipi_data, ipi_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }