^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This file contains the lowest level x86_64-specific interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * entry and irq statistics code. All the remaining irq logic is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * done by the generic kernel/irq/ code and in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * x86_64-specific irq controller code. (e.g. i8259.c and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * io_apic.c.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/cpu_entry_area.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/irq_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/io_apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) DEFINE_PER_CPU_PAGE_ALIGNED(struct irq_stack, irq_stack_backing_store) __visible;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) DECLARE_INIT_PER_CPU(irq_stack_backing_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #ifdef CONFIG_VMAP_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * VMAP the backing store with guard pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static int map_irq_stack(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) char *stack = (char *)per_cpu_ptr(&irq_stack_backing_store, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct page *pages[IRQ_STACK_SIZE / PAGE_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) void *va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) for (i = 0; i < IRQ_STACK_SIZE / PAGE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) phys_addr_t pa = per_cpu_ptr_to_phys(stack + (i << PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) pages[i] = pfn_to_page(pa >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, VM_MAP, PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (!va)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * If VMAP stacks are disabled due to KASAN, just use the per cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * backing store without guard pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static int map_irq_stack(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) void *va = per_cpu_ptr(&irq_stack_backing_store, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) int irq_init_percpu_irqstack(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (per_cpu(hardirq_stack_ptr, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return map_irq_stack(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) void do_softirq_own_stack(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) run_on_irqstack_cond(__do_softirq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }