^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Based on arch/arm/kernel/irq.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1992 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/irqchip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/scs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/daifflags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/vmap_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* Only access this in an NMI enter/exit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) DEFINE_PER_CPU(unsigned long *, irq_stack_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) DECLARE_PER_CPU(unsigned long *, irq_shadow_call_stack_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #ifdef CONFIG_SHADOW_CALL_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) DEFINE_PER_CPU(unsigned long *, irq_shadow_call_stack_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static void init_irq_scs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (!IS_ENABLED(CONFIG_SHADOW_CALL_STACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) per_cpu(irq_shadow_call_stack_ptr, cpu) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) scs_alloc(cpu_to_node(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #ifdef CONFIG_VMAP_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static void init_irq_stacks(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned long *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) p = arch_alloc_vmap_stack(IRQ_STACK_SIZE, cpu_to_node(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) per_cpu(irq_stack_ptr, cpu) = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) DEFINE_PER_CPU_ALIGNED(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static void init_irq_stacks(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) void __init init_IRQ(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) init_irq_stacks();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) init_irq_scs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) irqchip_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (!handle_arch_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) panic("No interrupt controller found.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (system_uses_irq_prio_masking()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * Now that we have a stack for our IRQ handler, set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * the PMR/PSR pair to a consistent state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) WARN_ON(read_sysreg(daif) & PSR_A_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) local_daif_restore(DAIF_PROCCTX_NOIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }