^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * License. See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2008 Nicolas Schichan <nschichan@freebox.fr>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/irq_cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/mipsregs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <bcm63xx_cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <bcm63xx_regs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <bcm63xx_io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <bcm63xx_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static DEFINE_SPINLOCK(ipic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static DEFINE_SPINLOCK(epic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static u32 irq_stat_addr[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static u32 irq_mask_addr[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static void (*dispatch_internal)(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static int is_ext_irq_cascaded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static unsigned int ext_irq_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static unsigned int ext_irq_start, ext_irq_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static void (*internal_irq_mask)(struct irq_data *d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static void (*internal_irq_unmask)(struct irq_data *d, const struct cpumask *m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static inline u32 get_ext_irq_perf_reg(int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) if (irq < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return ext_irq_cfg_reg1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return ext_irq_cfg_reg2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static inline void handle_internal(int intbit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (is_ext_irq_cascaded &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) intbit >= ext_irq_start && intbit <= ext_irq_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) do_IRQ(intbit + IRQ_INTERNAL_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static inline int enable_irq_for_cpu(int cpu, struct irq_data *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) const struct cpumask *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) bool enable = cpu_online(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) enable &= cpumask_test_cpu(cpu, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) else if (irqd_affinity_was_set(d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) enable &= cpumask_test_cpu(cpu, irq_data_get_affinity_mask(d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * prioritize any interrupt relatively to another. the static counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * will resume the loop where it ended the last time we left this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define BUILD_IPIC_INTERNAL(width) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) void __dispatch_internal_##width(int cpu) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) u32 pending[width / 32]; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) unsigned int src, tgt; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) bool irqs_pending = false; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static unsigned int i[2]; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned int *next = &i[cpu]; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) unsigned long flags; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /* read registers in reverse order */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) spin_lock_irqsave(&ipic_lock, flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) for (src = 0, tgt = (width / 32); src < (width / 32); src++) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) u32 val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) val = bcm_readl(irq_stat_addr[cpu] + src * sizeof(u32)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) val &= bcm_readl(irq_mask_addr[cpu] + src * sizeof(u32)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) pending[--tgt] = val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) irqs_pending = true; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) spin_unlock_irqrestore(&ipic_lock, flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (!irqs_pending) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) while (1) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) unsigned int to_call = *next; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) *next = (*next + 1) & (width - 1); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (pending[to_call / 32] & (1 << (to_call & 0x1f))) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) handle_internal(to_call); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static void __internal_irq_mask_##width(struct irq_data *d) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) u32 val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) unsigned irq = d->irq - IRQ_INTERNAL_BASE; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) unsigned reg = (irq / 32) ^ (width/32 - 1); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) unsigned bit = irq & 0x1f; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) unsigned long flags; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int cpu; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) spin_lock_irqsave(&ipic_lock, flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) for_each_present_cpu(cpu) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (!irq_mask_addr[cpu]) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) val &= ~(1 << bit); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) spin_unlock_irqrestore(&ipic_lock, flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static void __internal_irq_unmask_##width(struct irq_data *d, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) const struct cpumask *m) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) u32 val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) unsigned irq = d->irq - IRQ_INTERNAL_BASE; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) unsigned reg = (irq / 32) ^ (width/32 - 1); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) unsigned bit = irq & 0x1f; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) unsigned long flags; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) int cpu; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) spin_lock_irqsave(&ipic_lock, flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) for_each_present_cpu(cpu) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (!irq_mask_addr[cpu]) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (enable_irq_for_cpu(cpu, d, m)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) val |= (1 << bit); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) else \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) val &= ~(1 << bit); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) spin_unlock_irqrestore(&ipic_lock, flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) BUILD_IPIC_INTERNAL(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) BUILD_IPIC_INTERNAL(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) asmlinkage void plat_irq_dispatch(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) u32 cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) cause = read_c0_cause() & read_c0_status() & ST0_IM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (!cause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (cause & CAUSEF_IP7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) do_IRQ(7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (cause & CAUSEF_IP0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) do_IRQ(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (cause & CAUSEF_IP1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) do_IRQ(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (cause & CAUSEF_IP2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) dispatch_internal(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (is_ext_irq_cascaded) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (cause & CAUSEF_IP3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) dispatch_internal(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (cause & CAUSEF_IP3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) do_IRQ(IRQ_EXT_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (cause & CAUSEF_IP4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) do_IRQ(IRQ_EXT_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (cause & CAUSEF_IP5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) do_IRQ(IRQ_EXT_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (cause & CAUSEF_IP6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) do_IRQ(IRQ_EXT_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) } while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * internal IRQs operations: only mask/unmask on PERF irq mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static void bcm63xx_internal_irq_mask(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) internal_irq_mask(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static void bcm63xx_internal_irq_unmask(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) internal_irq_unmask(d, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * external IRQs operations: mask/unmask and clear on PERF external
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * irq control register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static void bcm63xx_external_irq_mask(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) u32 reg, regaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) regaddr = get_ext_irq_perf_reg(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) spin_lock_irqsave(&epic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) reg = bcm_perf_readl(regaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (BCMCPU_IS_6348())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) reg &= ~EXTIRQ_CFG_MASK(irq % 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) bcm_perf_writel(reg, regaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) spin_unlock_irqrestore(&epic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (is_ext_irq_cascaded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) internal_irq_mask(irq_get_irq_data(irq + ext_irq_start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static void bcm63xx_external_irq_unmask(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) u32 reg, regaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) regaddr = get_ext_irq_perf_reg(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) spin_lock_irqsave(&epic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) reg = bcm_perf_readl(regaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (BCMCPU_IS_6348())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) reg |= EXTIRQ_CFG_MASK_6348(irq % 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) reg |= EXTIRQ_CFG_MASK(irq % 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) bcm_perf_writel(reg, regaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) spin_unlock_irqrestore(&epic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (is_ext_irq_cascaded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) internal_irq_unmask(irq_get_irq_data(irq + ext_irq_start),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static void bcm63xx_external_irq_clear(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) u32 reg, regaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) regaddr = get_ext_irq_perf_reg(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) spin_lock_irqsave(&epic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) reg = bcm_perf_readl(regaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (BCMCPU_IS_6348())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) reg |= EXTIRQ_CFG_CLEAR(irq % 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) bcm_perf_writel(reg, regaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) spin_unlock_irqrestore(&epic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static int bcm63xx_external_irq_set_type(struct irq_data *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) unsigned int flow_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) u32 reg, regaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) int levelsense, sense, bothedge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) flow_type &= IRQ_TYPE_SENSE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (flow_type == IRQ_TYPE_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) flow_type = IRQ_TYPE_LEVEL_LOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) levelsense = sense = bothedge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) switch (flow_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) case IRQ_TYPE_EDGE_BOTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) bothedge = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) case IRQ_TYPE_EDGE_RISING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) sense = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) case IRQ_TYPE_EDGE_FALLING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) case IRQ_TYPE_LEVEL_HIGH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) levelsense = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) sense = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) case IRQ_TYPE_LEVEL_LOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) levelsense = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) pr_err("bogus flow type combination given !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) regaddr = get_ext_irq_perf_reg(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) spin_lock_irqsave(&epic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) reg = bcm_perf_readl(regaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) irq %= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) switch (bcm63xx_get_cpu_id()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) case BCM6348_CPU_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (levelsense)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (sense)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) reg |= EXTIRQ_CFG_SENSE_6348(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) reg &= ~EXTIRQ_CFG_SENSE_6348(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (bothedge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) case BCM3368_CPU_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) case BCM6328_CPU_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) case BCM6338_CPU_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) case BCM6345_CPU_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) case BCM6358_CPU_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) case BCM6362_CPU_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) case BCM6368_CPU_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (levelsense)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) reg |= EXTIRQ_CFG_LEVELSENSE(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (sense)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) reg |= EXTIRQ_CFG_SENSE(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) reg &= ~EXTIRQ_CFG_SENSE(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (bothedge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) reg |= EXTIRQ_CFG_BOTHEDGE(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) bcm_perf_writel(reg, regaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) spin_unlock_irqrestore(&epic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) irqd_set_trigger_type(d, flow_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) irq_set_handler_locked(d, handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) irq_set_handler_locked(d, handle_edge_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return IRQ_SET_MASK_OK_NOCOPY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static int bcm63xx_internal_set_affinity(struct irq_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) const struct cpumask *dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (!irqd_irq_disabled(data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) internal_irq_unmask(data, dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static struct irq_chip bcm63xx_internal_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) .name = "bcm63xx_ipic",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) .irq_mask = bcm63xx_internal_irq_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) .irq_unmask = bcm63xx_internal_irq_unmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) static struct irq_chip bcm63xx_external_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) .name = "bcm63xx_epic",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) .irq_ack = bcm63xx_external_irq_clear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) .irq_mask = bcm63xx_external_irq_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) .irq_unmask = bcm63xx_external_irq_unmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) .irq_set_type = bcm63xx_external_irq_set_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) static void bcm63xx_init_irq(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) int irq_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) irq_stat_addr[0] = bcm63xx_regset_address(RSET_PERF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) irq_mask_addr[0] = bcm63xx_regset_address(RSET_PERF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) irq_stat_addr[1] = bcm63xx_regset_address(RSET_PERF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) irq_mask_addr[1] = bcm63xx_regset_address(RSET_PERF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) switch (bcm63xx_get_cpu_id()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) case BCM3368_CPU_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) irq_stat_addr[0] += PERF_IRQSTAT_3368_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) irq_mask_addr[0] += PERF_IRQMASK_3368_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) irq_stat_addr[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) irq_mask_addr[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) irq_bits = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) ext_irq_count = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) case BCM6328_CPU_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) irq_mask_addr[1] += PERF_IRQMASK_6328_REG(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) irq_bits = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) ext_irq_count = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) is_ext_irq_cascaded = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) case BCM6338_CPU_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) irq_stat_addr[0] += PERF_IRQSTAT_6338_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) irq_mask_addr[0] += PERF_IRQMASK_6338_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) irq_stat_addr[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) irq_mask_addr[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) irq_bits = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) ext_irq_count = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) case BCM6345_CPU_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) irq_stat_addr[0] += PERF_IRQSTAT_6345_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) irq_mask_addr[0] += PERF_IRQMASK_6345_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) irq_stat_addr[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) irq_mask_addr[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) irq_bits = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) ext_irq_count = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) case BCM6348_CPU_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) irq_stat_addr[0] += PERF_IRQSTAT_6348_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) irq_mask_addr[0] += PERF_IRQMASK_6348_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) irq_stat_addr[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) irq_mask_addr[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) irq_bits = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) ext_irq_count = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) case BCM6358_CPU_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) irq_stat_addr[0] += PERF_IRQSTAT_6358_REG(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) irq_mask_addr[0] += PERF_IRQMASK_6358_REG(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) irq_stat_addr[1] += PERF_IRQSTAT_6358_REG(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) irq_mask_addr[1] += PERF_IRQMASK_6358_REG(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) irq_bits = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) ext_irq_count = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) is_ext_irq_cascaded = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) case BCM6362_CPU_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) irq_stat_addr[0] += PERF_IRQSTAT_6362_REG(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) irq_mask_addr[0] += PERF_IRQMASK_6362_REG(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) irq_stat_addr[1] += PERF_IRQSTAT_6362_REG(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) irq_mask_addr[1] += PERF_IRQMASK_6362_REG(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) irq_bits = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) ext_irq_count = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) is_ext_irq_cascaded = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) case BCM6368_CPU_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) irq_stat_addr[0] += PERF_IRQSTAT_6368_REG(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) irq_mask_addr[0] += PERF_IRQMASK_6368_REG(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) irq_stat_addr[1] += PERF_IRQSTAT_6368_REG(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) irq_mask_addr[1] += PERF_IRQMASK_6368_REG(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) irq_bits = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) ext_irq_count = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) is_ext_irq_cascaded = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (irq_bits == 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) dispatch_internal = __dispatch_internal_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) internal_irq_mask = __internal_irq_mask_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) internal_irq_unmask = __internal_irq_unmask_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) dispatch_internal = __dispatch_internal_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) internal_irq_mask = __internal_irq_mask_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) internal_irq_unmask = __internal_irq_unmask_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) void __init arch_init_irq(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) int i, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) bcm63xx_init_irq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) mips_cpu_irq_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) handle_edge_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (!is_ext_irq_cascaded) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) for (i = 3; i < 3 + ext_irq_count; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) irq = MIPS_CPU_IRQ_BASE + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (request_irq(irq, no_action, IRQF_NO_THREAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) "cascade_extirq", NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) pr_err("Failed to request irq %d (cascade_extirq)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) irq = MIPS_CPU_IRQ_BASE + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade_ip2", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) pr_err("Failed to request irq %d (cascade_ip2)\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (is_ext_irq_cascaded) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) irq = MIPS_CPU_IRQ_BASE + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade_ip3",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) pr_err("Failed to request irq %d (cascade_ip3)\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) bcm63xx_internal_irq_chip.irq_set_affinity =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) bcm63xx_internal_set_affinity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) cpumask_clear(irq_default_affinity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }