^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/arch/xtensa/kernel/irq.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Xtensa built-in interrupt controller and some generic functions copied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * from i386.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2002 - 2013 Tensilica, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Chris Zankel <chris@zankel.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Kevin Chea
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/irqchip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/irqchip/xtensa-mx.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/irqchip/xtensa-pic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/mxregs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) DECLARE_PER_CPU(unsigned long, nmi_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) int irq = irq_find_mapping(NULL, hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #ifdef CONFIG_DEBUG_STACKOVERFLOW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* Debugging check for stack overflow: is there less than 1KB free? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned long sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) __asm__ __volatile__ ("mov %0, a1\n" : "=a" (sp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) sp &= THREAD_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (unlikely(sp < (sizeof(thread_info) + 1024)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) printk("Stack overflow in do_IRQ: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) sp - sizeof(struct thread_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) generic_handle_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) int arch_show_interrupts(struct seq_file *p, int prec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) unsigned cpu __maybe_unused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) show_ipi_list(p, prec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #if XTENSA_FAKE_NMI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) seq_printf(p, "%*s:", prec, "NMI");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) for_each_online_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) seq_printf(p, " %10lu", per_cpu(nmi_count, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) seq_puts(p, " Non-maskable interrupts\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) int xtensa_irq_domain_xlate(const u32 *intspec, unsigned int intsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) unsigned long int_irq, unsigned long ext_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) unsigned long *out_hwirq, unsigned int *out_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (WARN_ON(intsize < 1 || intsize > 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (intsize == 2 && intspec[1] == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int_irq = xtensa_map_ext_irq(ext_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (int_irq < XCHAL_NUM_INTERRUPTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) *out_hwirq = int_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) *out_hwirq = int_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) *out_type = IRQ_TYPE_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) int xtensa_irq_map(struct irq_domain *d, unsigned int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) irq_hw_number_t hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct irq_chip *irq_chip = d->host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) u32 mask = 1 << hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (mask & XCHAL_INTTYPE_MASK_SOFTWARE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) irq_set_chip_and_handler_name(irq, irq_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) handle_simple_irq, "level");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) irq_set_status_flags(irq, IRQ_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) irq_set_chip_and_handler_name(irq, irq_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) handle_edge_irq, "edge");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) irq_clear_status_flags(irq, IRQ_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) irq_set_chip_and_handler_name(irq, irq_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) handle_level_irq, "level");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) irq_set_status_flags(irq, IRQ_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) } else if (mask & XCHAL_INTTYPE_MASK_TIMER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) irq_set_chip_and_handler_name(irq, irq_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) handle_percpu_irq, "timer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) irq_clear_status_flags(irq, IRQ_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #ifdef XCHAL_INTTYPE_MASK_PROFILING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) } else if (mask & XCHAL_INTTYPE_MASK_PROFILING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) irq_set_chip_and_handler_name(irq, irq_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) handle_percpu_irq, "profiling");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) irq_set_status_flags(irq, IRQ_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) } else {/* XCHAL_INTTYPE_MASK_WRITE_ERROR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* XCHAL_INTTYPE_MASK_NMI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) irq_set_chip_and_handler_name(irq, irq_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) handle_level_irq, "level");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) irq_set_status_flags(irq, IRQ_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) unsigned xtensa_map_ext_irq(unsigned ext_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) unsigned mask = XCHAL_INTTYPE_MASK_EXTERN_EDGE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) XCHAL_INTTYPE_MASK_EXTERN_LEVEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) for (i = 0; mask; ++i, mask >>= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if ((mask & 1) && ext_irq-- == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return XCHAL_NUM_INTERRUPTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) unsigned xtensa_get_ext_irq_no(unsigned irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) unsigned mask = (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) XCHAL_INTTYPE_MASK_EXTERN_LEVEL) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) ((1u << irq) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return hweight32(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) void __init init_IRQ(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #ifdef CONFIG_USE_OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) irqchip_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #ifdef CONFIG_HAVE_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) xtensa_mx_init_legacy(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) xtensa_pic_init_legacy(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) ipi_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * The CPU has been marked offline. Migrate IRQs off this CPU. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * the affinity settings do not allow other CPUs, force them onto any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * available CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) void migrate_irqs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unsigned int i, cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) for_each_active_irq(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct irq_data *data = irq_get_irq_data(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct cpumask *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) unsigned int newcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (irqd_is_per_cpu(data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) mask = irq_data_get_affinity_mask(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (!cpumask_test_cpu(cpu, mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) newcpu = cpumask_any_and(mask, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (newcpu >= nr_cpu_ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) i, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) cpumask_setall(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) irq_set_affinity(i, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #endif /* CONFIG_HOTPLUG_CPU */