^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2010 John Crispin <john@phrozen.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/bootinfo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/irq_cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <lantiq_soc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /* register definitions - internal irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define LTQ_ICU_ISR 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define LTQ_ICU_IER 0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define LTQ_ICU_IOSR 0x0010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define LTQ_ICU_IRSR 0x0018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define LTQ_ICU_IMR 0x0020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define LTQ_ICU_IM_SIZE 0x28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /* register definitions - external irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define LTQ_EIU_EXIN_C 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define LTQ_EIU_EXIN_INIC 0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define LTQ_EIU_EXIN_INC 0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define LTQ_EIU_EXIN_INEN 0x000C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* number of external interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define MAX_EIU 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* the performance counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define LTQ_PERF_IRQ (INT_NUM_IM4_IRL0 + 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * irqs generated by devices attached to the EBU need to be acked in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * a special manner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define LTQ_ICU_EBU_IRQ 22
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define ltq_icu_w32(vpe, m, x, y) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) ltq_w32((x), ltq_icu_membase[vpe] + m*LTQ_ICU_IM_SIZE + (y))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define ltq_icu_r32(vpe, m, x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) ltq_r32(ltq_icu_membase[vpe] + m*LTQ_ICU_IM_SIZE + (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /* we have a cascade of 8 irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define MIPS_CPU_IRQ_CASCADE 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static int exin_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static u32 ltq_eiu_irq[MAX_EIU];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static void __iomem *ltq_icu_membase[NR_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static void __iomem *ltq_eiu_membase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static struct irq_domain *ltq_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static DEFINE_SPINLOCK(ltq_eiu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static DEFINE_RAW_SPINLOCK(ltq_icu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static int ltq_perfcount_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) int ltq_eiu_get_irq(int exin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (exin < exin_avail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return ltq_eiu_irq[exin];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) void ltq_disable_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned long im = offset / INT_NUM_IM_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) int vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) offset %= INT_NUM_IM_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) raw_spin_lock_irqsave(<q_icu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) for_each_present_cpu(vpe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) ltq_icu_w32(vpe, im,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) LTQ_ICU_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) raw_spin_unlock_irqrestore(<q_icu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) void ltq_mask_and_ack_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) unsigned long im = offset / INT_NUM_IM_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) int vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) offset %= INT_NUM_IM_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) raw_spin_lock_irqsave(<q_icu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) for_each_present_cpu(vpe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) ltq_icu_w32(vpe, im,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) LTQ_ICU_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) raw_spin_unlock_irqrestore(<q_icu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static void ltq_ack_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) unsigned long im = offset / INT_NUM_IM_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) offset %= INT_NUM_IM_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) raw_spin_lock_irqsave(<q_icu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) for_each_present_cpu(vpe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) raw_spin_unlock_irqrestore(<q_icu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) void ltq_enable_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) unsigned long im = offset / INT_NUM_IM_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) offset %= INT_NUM_IM_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) vpe = cpumask_first(irq_data_get_effective_affinity_mask(d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* This shouldn't be even possible, maybe during CPU hotplug spam */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (unlikely(vpe >= nr_cpu_ids))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) vpe = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) raw_spin_lock_irqsave(<q_icu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) ltq_icu_w32(vpe, im, ltq_icu_r32(vpe, im, LTQ_ICU_IER) | BIT(offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) LTQ_ICU_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) raw_spin_unlock_irqrestore(<q_icu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) for (i = 0; i < exin_avail; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (d->hwirq == ltq_eiu_irq[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) int val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) int edge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) case IRQF_TRIGGER_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) case IRQF_TRIGGER_RISING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) val = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) edge = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) case IRQF_TRIGGER_FALLING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) val = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) edge = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) case IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) val = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) edge = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) case IRQF_TRIGGER_HIGH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) val = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) case IRQF_TRIGGER_LOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) val = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) pr_err("invalid type %d for irq %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) type, d->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (edge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) irq_set_handler(d->hwirq, handle_edge_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) spin_lock_irqsave(<q_eiu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ltq_eiu_w32((ltq_eiu_r32(LTQ_EIU_EXIN_C) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) (~(7 << (i * 4)))) | (val << (i * 4)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) LTQ_EIU_EXIN_C);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) spin_unlock_irqrestore(<q_eiu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) ltq_enable_irq(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) for (i = 0; i < exin_avail; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (d->hwirq == ltq_eiu_irq[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* by default we are low level triggered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ltq_eiu_settype(d, IRQF_TRIGGER_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /* clear all pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INC) & ~BIT(i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) LTQ_EIU_EXIN_INC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | BIT(i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) LTQ_EIU_EXIN_INEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static void ltq_shutdown_eiu_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) ltq_disable_irq(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) for (i = 0; i < exin_avail; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (d->hwirq == ltq_eiu_irq[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /* disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) LTQ_EIU_EXIN_INEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) #if defined(CONFIG_SMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static int ltq_icu_irq_set_affinity(struct irq_data *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) const struct cpumask *cpumask, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct cpumask tmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (!cpumask_and(&tmask, cpumask, cpu_online_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) irq_data_update_effective_affinity(d, &tmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return IRQ_SET_MASK_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static struct irq_chip ltq_irq_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) .name = "icu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) .irq_enable = ltq_enable_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) .irq_disable = ltq_disable_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) .irq_unmask = ltq_enable_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) .irq_ack = ltq_ack_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) .irq_mask = ltq_disable_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) .irq_mask_ack = ltq_mask_and_ack_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) #if defined(CONFIG_SMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) .irq_set_affinity = ltq_icu_irq_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static struct irq_chip ltq_eiu_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) .name = "eiu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) .irq_startup = ltq_startup_eiu_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) .irq_shutdown = ltq_shutdown_eiu_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) .irq_enable = ltq_enable_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) .irq_disable = ltq_disable_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) .irq_unmask = ltq_enable_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) .irq_ack = ltq_ack_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) .irq_mask = ltq_disable_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) .irq_mask_ack = ltq_mask_and_ack_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) .irq_set_type = ltq_eiu_settype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) #if defined(CONFIG_SMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) .irq_set_affinity = ltq_icu_irq_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static void ltq_hw_irq_handler(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) unsigned int module = irq_desc_get_irq(desc) - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) u32 irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) irq_hw_number_t hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) int vpe = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) irq = ltq_icu_r32(vpe, module, LTQ_ICU_IOSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (irq == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * silicon bug causes only the msb set to 1 to be valid. all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * other bits might be bogus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) irq = __fls(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* if this is a EBU irq, we need to ack it or get a deadlock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (irq == LTQ_ICU_EBU_IRQ && !module && LTQ_EBU_PCC_ISTAT != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) LTQ_EBU_PCC_ISTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct irq_chip *chip = <q_irq_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct irq_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (hw < MIPS_CPU_IRQ_CASCADE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) for (i = 0; i < exin_avail; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (hw == ltq_eiu_irq[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) chip = <q_eiu_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) data = irq_get_irq_data(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) irq_data_update_effective_affinity(data, cpumask_of(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) irq_set_chip_and_handler(irq, chip, handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static const struct irq_domain_ops irq_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) .xlate = irq_domain_xlate_onetwocell,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) .map = icu_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) int __init icu_of_init(struct device_node *node, struct device_node *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct device_node *eiu_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) int i, ret, vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* load register regions of available ICUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) for_each_possible_cpu(vpe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (of_address_to_resource(node, vpe, &res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) panic("Failed to get icu%i memory range", vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (!request_mem_region(res.start, resource_size(&res),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) res.name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) pr_err("Failed to request icu%i memory\n", vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) ltq_icu_membase[vpe] = ioremap(res.start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) resource_size(&res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (!ltq_icu_membase[vpe])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) panic("Failed to remap icu%i memory", vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /* turn off all irqs by default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) for_each_possible_cpu(vpe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) for (i = 0; i < MAX_IM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /* make sure all irqs are turned off by default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) ltq_icu_w32(vpe, i, 0, LTQ_ICU_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* clear all possibly pending interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) ltq_icu_w32(vpe, i, ~0, LTQ_ICU_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) ltq_icu_w32(vpe, i, ~0, LTQ_ICU_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /* clear resend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ltq_icu_w32(vpe, i, 0, LTQ_ICU_IRSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) mips_cpu_irq_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) for (i = 0; i < MAX_IM; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) ltq_domain = irq_domain_add_linear(node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) &irq_domain_ops, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /* tell oprofile which irq to use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* the external interrupts are optional and xway only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /* find out how many external irq sources we have */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) exin_avail = of_property_count_u32_elems(eiu_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) "lantiq,eiu-irqs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (exin_avail > MAX_EIU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) exin_avail = MAX_EIU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) ret = of_property_read_u32_array(eiu_node, "lantiq,eiu-irqs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) ltq_eiu_irq, exin_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) panic("failed to load external irq resources");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (!request_mem_region(res.start, resource_size(&res),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) res.name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) pr_err("Failed to request eiu memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) ltq_eiu_membase = ioremap(res.start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) resource_size(&res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (!ltq_eiu_membase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) panic("Failed to remap eiu memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) int get_c0_perfcount_int(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return ltq_perfcount_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) unsigned int get_c0_compare_int(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return CP0_LEGACY_COMPARE_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static const struct of_device_id of_irq_ids[] __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) { .compatible = "lantiq,icu", .data = icu_of_init },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) void __init arch_init_irq(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) of_irq_init(of_irq_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }