Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * ip30-irq.c: Highlevel interrupt handling for IP30 architecture.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/tick.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <asm/irq_cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/sgi/heart.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include "ip30-common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) struct heart_irq_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	u64	*irq_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	int	cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) static DECLARE_BITMAP(heart_irq_map, HEART_NUM_IRQS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) static DEFINE_PER_CPU(unsigned long, irq_enable_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) static inline int heart_alloc_int(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	bit = find_first_zero_bit(heart_irq_map, HEART_NUM_IRQS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	if (bit >= HEART_NUM_IRQS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 		return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	if (test_and_set_bit(bit, heart_irq_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 		goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	return bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) static void ip30_error_irq(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	u64 pending, mask, cause, error_irqs, err_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	pending = heart_read(&heart_regs->isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	mask = heart_read(&heart_regs->imr[cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	cause = heart_read(&heart_regs->cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	error_irqs = (pending & HEART_L4_INT_MASK & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	/* Bail if there's nothing to process (how did we get here, then?) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	if (unlikely(!error_irqs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	/* Prevent any of the error IRQs from firing again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	heart_write(mask & ~(pending), &heart_regs->imr[cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	/* Ack all error IRQs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	heart_write(HEART_L4_INT_MASK, &heart_regs->clear_isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	 * If we also have a cause value, then something happened, so loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	 * through the error IRQs and report a "heart attack" for each one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	 * and print the value of the HEART cause register.  This is really
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	 * primitive right now, but it should hopefully work until a more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	 * robust error handling routine can be put together.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	 * Refer to heart.h for the HC_* macros to work out the cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	 * that got us here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	if (cause) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		pr_alert("IP30: CPU%d: HEART ATTACK! ISR = 0x%.16llx, IMR = 0x%.16llx, CAUSE = 0x%.16llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 			 cpu, pending, mask, cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		if (cause & HC_COR_MEM_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 			err_reg = heart_read(&heart_regs->mem_err_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 			pr_alert("  HEART_MEMERR_ADDR = 0x%.16llx\n", err_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		/* i = 63; i >= 51; i-- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		for (i = HEART_ERR_MASK_END; i >= HEART_ERR_MASK_START; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 			if ((pending >> i) & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 				pr_alert("  HEART Error IRQ #%d\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		/* XXX: Seems possible to loop forever here, so panic(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		panic("IP30: Fatal Error !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	/* Unmask the error IRQs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	heart_write(mask, &heart_regs->imr[cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) static void ip30_normal_irq(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	struct irq_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	u64 pend, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	pend = heart_read(&heart_regs->isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	mask = (heart_read(&heart_regs->imr[cpu]) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		(HEART_L0_INT_MASK | HEART_L1_INT_MASK | HEART_L2_INT_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	pend &= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	if (unlikely(!pend))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	if (pend & BIT_ULL(HEART_L2_INT_RESCHED_CPU_0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 			    &heart_regs->clear_isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		scheduler_ipi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	} else if (pend & BIT_ULL(HEART_L2_INT_RESCHED_CPU_1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 			    &heart_regs->clear_isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		scheduler_ipi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	} else if (pend & BIT_ULL(HEART_L2_INT_CALL_CPU_0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 			    &heart_regs->clear_isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		generic_smp_call_function_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	} else if (pend & BIT_ULL(HEART_L2_INT_CALL_CPU_1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 			    &heart_regs->clear_isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		generic_smp_call_function_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		domain = irq_desc_get_handler_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		irq = irq_linear_revmap(domain, __ffs(pend));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		if (irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 			generic_handle_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 			spurious_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static void ip30_ack_heart_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	heart_write(BIT_ULL(d->hwirq), &heart_regs->clear_isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static void ip30_mask_heart_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	clear_bit(d->hwirq, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	heart_write(*mask, &heart_regs->imr[hd->cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static void ip30_mask_and_ack_heart_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	clear_bit(d->hwirq, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	heart_write(*mask, &heart_regs->imr[hd->cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	heart_write(BIT_ULL(d->hwirq), &heart_regs->clear_isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static void ip30_unmask_heart_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	set_bit(d->hwirq, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	heart_write(*mask, &heart_regs->imr[hd->cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static int ip30_set_heart_irq_affinity(struct irq_data *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 				       const struct cpumask *mask, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	if (!hd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	if (irqd_is_started(d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		ip30_mask_and_ack_heart_irq(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	hd->cpu = cpumask_first_and(mask, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	if (irqd_is_started(d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		ip30_unmask_heart_irq(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	irq_data_update_effective_affinity(d, cpumask_of(hd->cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static struct irq_chip heart_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	.name			= "HEART",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	.irq_ack		= ip30_ack_heart_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	.irq_mask		= ip30_mask_heart_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	.irq_mask_ack		= ip30_mask_and_ack_heart_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	.irq_unmask		= ip30_unmask_heart_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	.irq_set_affinity	= ip30_set_heart_irq_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static int heart_domain_alloc(struct irq_domain *domain, unsigned int virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			      unsigned int nr_irqs, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	struct irq_alloc_info *info = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	struct heart_irq_data *hd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	int hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	if (nr_irqs > 1 || !info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	hd = kzalloc(sizeof(*hd), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	if (!hd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	hwirq = heart_alloc_int();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	if (hwirq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		kfree(hd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	irq_domain_set_info(domain, virq, hwirq, &heart_irq_chip, hd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 			    handle_level_irq, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static void heart_domain_free(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 			      unsigned int virq, unsigned int nr_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	struct irq_data *irqd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	if (nr_irqs > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	irqd = irq_domain_get_irq_data(domain, virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	if (irqd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		clear_bit(irqd->hwirq, heart_irq_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		kfree(irqd->chip_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static const struct irq_domain_ops heart_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	.alloc = heart_domain_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	.free  = heart_domain_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) void __init ip30_install_ipi(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	unsigned long *mask = &per_cpu(irq_enable_mask, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	set_bit(HEART_L2_INT_RESCHED_CPU_0 + cpu, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_0 + cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		    &heart_regs->clear_isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	set_bit(HEART_L2_INT_CALL_CPU_0 + cpu, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_0 + cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		    &heart_regs->clear_isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	heart_write(*mask, &heart_regs->imr[cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) void __init arch_init_irq(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	struct irq_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	struct fwnode_handle *fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	unsigned long *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	mips_cpu_irq_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	/* Mask all IRQs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	/* Ack everything. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	heart_write(HEART_ACK_ALL_MASK, &heart_regs->clear_isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	/* Enable specific HEART error IRQs for each CPU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	mask = &per_cpu(irq_enable_mask, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	*mask |= HEART_CPU0_ERR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	heart_write(*mask, &heart_regs->imr[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	mask = &per_cpu(irq_enable_mask, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	*mask |= HEART_CPU1_ERR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	heart_write(*mask, &heart_regs->imr[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	 * Some HEART bits are reserved by hardware or by software convention.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	 * Mark these as reserved right away so they won't be accidentally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	 * used later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	set_bit(HEART_L0_INT_GENERIC, heart_irq_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	set_bit(HEART_L0_INT_FLOW_CTRL_HWTR_0, heart_irq_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	set_bit(HEART_L0_INT_FLOW_CTRL_HWTR_1, heart_irq_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	set_bit(HEART_L2_INT_RESCHED_CPU_0, heart_irq_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	set_bit(HEART_L2_INT_RESCHED_CPU_1, heart_irq_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	set_bit(HEART_L2_INT_CALL_CPU_0, heart_irq_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	set_bit(HEART_L2_INT_CALL_CPU_1, heart_irq_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	set_bit(HEART_L3_INT_TIMER, heart_irq_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	/* Reserve the error interrupts (#51 to #63). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	for (i = HEART_L4_INT_XWID_ERR_9; i <= HEART_L4_INT_HEART_EXCP; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		set_bit(i, heart_irq_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	fn = irq_domain_alloc_named_fwnode("HEART");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	WARN_ON(fn == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	if (!fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	domain = irq_domain_create_linear(fn, HEART_NUM_IRQS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 					  &heart_domain_ops, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	WARN_ON(domain == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	if (!domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	irq_set_default_host(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	irq_set_percpu_devid(IP30_HEART_L0_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	irq_set_chained_handler_and_data(IP30_HEART_L0_IRQ, ip30_normal_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 					 domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	irq_set_percpu_devid(IP30_HEART_L1_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	irq_set_chained_handler_and_data(IP30_HEART_L1_IRQ, ip30_normal_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 					 domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	irq_set_percpu_devid(IP30_HEART_L2_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	irq_set_chained_handler_and_data(IP30_HEART_L2_IRQ, ip30_normal_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 					 domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	irq_set_percpu_devid(IP30_HEART_ERR_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	irq_set_chained_handler_and_data(IP30_HEART_ERR_IRQ, ip30_error_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 					 domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }