^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/kernel/softirq.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1992 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/freezer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/smpboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/tick.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <trace/events/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) EXPORT_TRACEPOINT_SYMBOL_GPL(irq_handler_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) EXPORT_TRACEPOINT_SYMBOL_GPL(irq_handler_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) - No shared variables, all the data are CPU local.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) - If a softirq needs serialization, let it serialize itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) by its own spinlocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) - Even if softirq is serialized, only local cpu is marked for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) execution. Hence, we get something sort of weak cpu binding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) Though it is still not clear, will it result in better locality
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) or will not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) Examples:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) - NET RX softirq. It is multithreaded and does not require
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) any global serialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) - NET TX softirq. It kicks software netdevice queues, hence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) it is logically serialized per device, but this serialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) is invisible to common code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) - Tasklets: serialized wrt itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #ifndef __ARCH_IRQ_STAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) EXPORT_PER_CPU_SYMBOL(irq_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) EXPORT_PER_CPU_SYMBOL_GPL(ksoftirqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * active_softirqs -- per cpu, a mask of softirqs that are being handled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * with the expectation that approximate answers are acceptable and therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * no synchronization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) DEFINE_PER_CPU(__u32, active_softirqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) const char * const softirq_to_name[NR_SOFTIRQS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) "TASKLET", "SCHED", "HRTIMER", "RCU"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * we cannot loop indefinitely here to avoid userspace starvation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * but we also don't want to introduce a worst case 1/HZ latency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * to the pending events, so lets the scheduler to balance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * the softirq load for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static void wakeup_softirqd(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* Interrupts are disabled: no need to stop preemption */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct task_struct *tsk = __this_cpu_read(ksoftirqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (tsk && tsk->state != TASK_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) wake_up_process(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * preempt_count and SOFTIRQ_OFFSET usage:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * softirq processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * on local_bh_disable or local_bh_enable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * This lets us distinguish between whether we are currently processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * softirq and whether we just have bh disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * This one is for softirq.c-internal use,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * where hardirqs are disabled legitimately:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) DEFINE_PER_CPU(int, hardirqs_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) DEFINE_PER_CPU(int, hardirq_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) WARN_ON_ONCE(in_irq());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) raw_local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * The preempt tracer hooks into preempt_count_add and will break
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * is set and before current->softirq_enabled is cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * We must manually increment preempt_count here and manually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * call the trace_preempt_off later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) __preempt_count_add(cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * Were softirqs turned off above:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (softirq_count() == (cnt & SOFTIRQ_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) lockdep_softirqs_off(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) raw_local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (preempt_count() == cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #ifdef CONFIG_DEBUG_PREEMPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) current->preempt_disable_ip = get_lock_parent_ip();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) EXPORT_SYMBOL(__local_bh_disable_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #endif /* CONFIG_TRACE_IRQFLAGS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static void __local_bh_enable(unsigned int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (preempt_count() == cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (softirq_count() == (cnt & SOFTIRQ_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) lockdep_softirqs_on(_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) __preempt_count_sub(cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * Special-case - softirqs can safely be enabled by __do_softirq(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * without processing still-pending softirqs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) void _local_bh_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) WARN_ON_ONCE(in_irq());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) EXPORT_SYMBOL(_local_bh_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) WARN_ON_ONCE(in_irq());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) lockdep_assert_irqs_enabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * Are softirqs going to be turned on now:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) lockdep_softirqs_on(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * Keep preemption disabled until we are done with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * softirq processing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) preempt_count_sub(cnt - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (unlikely(!in_interrupt() && local_softirq_pending())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * Run softirq if any pending. And do it in its own stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * as we may be calling this deep in a task call stack already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) do_softirq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) preempt_count_dec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) preempt_check_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) EXPORT_SYMBOL(__local_bh_enable_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * but break the loop if need_resched() is set or after 2 ms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * certain cases, such as stop_machine(), jiffies may cease to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * increment and so we need the MAX_SOFTIRQ_RESTART limit as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * well to make sure we eventually return from this method.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * These limits have been established via experimentation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * The two things to balance is latency against fairness -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * we want to handle softirqs as soon as possible, but they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * should not be able to lock up the box.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #define MAX_SOFTIRQ_RESTART 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * When we run softirqs from irq_exit() and thus on the hardirq stack we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * to keep the lockdep irq context tracking as tight as possible in order to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * not miss-qualify lock contexts and miss possible deadlocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static inline bool lockdep_softirq_start(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) bool in_hardirq = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (lockdep_hardirq_context()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) in_hardirq = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) lockdep_hardirq_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) lockdep_softirq_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return in_hardirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static inline void lockdep_softirq_end(bool in_hardirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) lockdep_softirq_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (in_hardirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) lockdep_hardirq_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static inline bool lockdep_softirq_start(void) { return false; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static inline void lockdep_softirq_end(bool in_hardirq) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) #define softirq_deferred_for_rt(pending) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) __u32 deferred = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (cpupri_check_rt()) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) deferred = pending & LONG_SOFTIRQ_MASK; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) pending &= ~LONG_SOFTIRQ_MASK; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) deferred; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) asmlinkage __visible void __softirq_entry __do_softirq(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) unsigned long old_flags = current->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) int max_restart = MAX_SOFTIRQ_RESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct softirq_action *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) bool in_hardirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) __u32 deferred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) __u32 pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) int softirq_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * Mask out PF_MEMALLOC as the current task context is borrowed for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * again if the socket is related to swapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) current->flags &= ~PF_MEMALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) pending = local_softirq_pending();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) deferred = softirq_deferred_for_rt(pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) account_irq_enter_time(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) in_hardirq = lockdep_softirq_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /* Reset the pending bitmask before enabling irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) set_softirq_pending(deferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) __this_cpu_write(active_softirqs, pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) h = softirq_vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) while ((softirq_bit = ffs(pending))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) unsigned int vec_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) int prev_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) h += softirq_bit - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) vec_nr = h - softirq_vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) prev_count = preempt_count();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) kstat_incr_softirqs_this_cpu(vec_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) trace_softirq_entry(vec_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) h->action(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) trace_softirq_exit(vec_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (unlikely(prev_count != preempt_count())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) vec_nr, softirq_to_name[vec_nr], h->action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) prev_count, preempt_count());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) preempt_count_set(prev_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) h++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) pending >>= softirq_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) __this_cpu_write(active_softirqs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (__this_cpu_read(ksoftirqd) == current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) rcu_softirq_qs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) pending = local_softirq_pending();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) deferred = softirq_deferred_for_rt(pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (time_before(jiffies, end) && !need_resched() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) --max_restart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) #ifndef CONFIG_RT_SOFTINT_OPTIMIZATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) wakeup_softirqd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) #ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (pending | deferred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) wakeup_softirqd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) lockdep_softirq_end(in_hardirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) account_irq_exit_time(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) __local_bh_enable(SOFTIRQ_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) WARN_ON_ONCE(in_interrupt());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) current_restore_flags(old_flags, PF_MEMALLOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) asmlinkage __visible void do_softirq(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) __u32 pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (in_interrupt())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) pending = local_softirq_pending();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) do_softirq_own_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * irq_enter_rcu - Enter an interrupt context with RCU watching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) void irq_enter_rcu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (is_idle_task(current) && !in_interrupt()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * Prevent raise_softirq from needlessly waking up ksoftirqd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * here, as softirq will be serviced on return from interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) tick_irq_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) _local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) __irq_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * irq_enter - Enter an interrupt context including RCU update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) void irq_enter(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) rcu_irq_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) irq_enter_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static inline void invoke_softirq(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (!force_irqthreads) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * We can safely execute softirq on the current stack if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * it is the irq stack, because it should be near empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * at this stage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) __do_softirq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * Otherwise, irq_exit() is called on the task stack that can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * be potentially deep already. So call softirq in its own stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * to prevent from any overrun.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) do_softirq_own_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) wakeup_softirqd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static inline void tick_irq_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) #ifdef CONFIG_NO_HZ_COMMON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /* Make sure that timer wheel updates are propagated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (!in_irq())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) tick_nohz_irq_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static inline void __irq_exit_rcu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) account_irq_exit_time(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) preempt_count_sub(HARDIRQ_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (!in_interrupt() && local_softirq_pending())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) invoke_softirq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) tick_irq_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * irq_exit_rcu() - Exit an interrupt context without updating RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * Also processes softirqs if needed and possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) void irq_exit_rcu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) __irq_exit_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /* must be last! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) lockdep_hardirq_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * irq_exit - Exit an interrupt context, update RCU and lockdep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * Also processes softirqs if needed and possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) void irq_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) __irq_exit_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) rcu_irq_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /* must be last! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) lockdep_hardirq_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * This function must run with irqs disabled!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) inline void raise_softirq_irqoff(unsigned int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) __raise_softirq_irqoff(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * If we're in an interrupt or softirq, we're done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * (this also catches softirq-disabled code). We will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * actually run the softirq once we return from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * the irq or softirq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * Otherwise we wake up ksoftirqd to make sure we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * schedule the softirq soon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (!in_interrupt())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) wakeup_softirqd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) void raise_softirq(unsigned int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) raise_softirq_irqoff(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) void __raise_softirq_irqoff(unsigned int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) trace_softirq_raise(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) or_softirq_pending(1UL << nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) void open_softirq(int nr, void (*action)(struct softirq_action *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) softirq_vec[nr].action = action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * Tasklets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct tasklet_head {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) struct tasklet_struct *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct tasklet_struct **tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) static void __tasklet_schedule_common(struct tasklet_struct *t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) struct tasklet_head __percpu *headp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) unsigned int softirq_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct tasklet_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) head = this_cpu_ptr(headp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) t->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) *head->tail = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) head->tail = &(t->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) raise_softirq_irqoff(softirq_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) void __tasklet_schedule(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) __tasklet_schedule_common(t, &tasklet_vec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) TASKLET_SOFTIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) EXPORT_SYMBOL(__tasklet_schedule);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) void __tasklet_hi_schedule(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) __tasklet_schedule_common(t, &tasklet_hi_vec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) HI_SOFTIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) EXPORT_SYMBOL(__tasklet_hi_schedule);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) static void tasklet_action_common(struct softirq_action *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) struct tasklet_head *tl_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) unsigned int softirq_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct tasklet_struct *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) list = tl_head->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) tl_head->head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) tl_head->tail = &tl_head->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) while (list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct tasklet_struct *t = list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) list = list->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (tasklet_trylock(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (!atomic_read(&t->count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (!test_and_clear_bit(TASKLET_STATE_SCHED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) &t->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (t->use_callback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) trace_tasklet_entry(t->callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) t->callback(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) trace_tasklet_exit(t->callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) trace_tasklet_entry(t->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) t->func(t->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) trace_tasklet_exit(t->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) tasklet_unlock(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) tasklet_unlock(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) t->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) *tl_head->tail = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) tl_head->tail = &t->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) __raise_softirq_irqoff(softirq_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static __latent_entropy void tasklet_action(struct softirq_action *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) void tasklet_setup(struct tasklet_struct *t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) void (*callback)(struct tasklet_struct *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) t->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) t->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) atomic_set(&t->count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) t->callback = callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) t->use_callback = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) t->data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) EXPORT_SYMBOL(tasklet_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) void tasklet_init(struct tasklet_struct *t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) void (*func)(unsigned long), unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) t->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) t->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) atomic_set(&t->count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) t->func = func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) t->use_callback = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) t->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) EXPORT_SYMBOL(tasklet_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) void tasklet_kill(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (in_interrupt())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) pr_notice("Attempt to kill tasklet from interrupt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) yield();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) } while (test_bit(TASKLET_STATE_SCHED, &t->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) tasklet_unlock_wait(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) clear_bit(TASKLET_STATE_SCHED, &t->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) EXPORT_SYMBOL(tasklet_kill);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) void __init softirq_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) per_cpu(tasklet_vec, cpu).tail =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) &per_cpu(tasklet_vec, cpu).head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) per_cpu(tasklet_hi_vec, cpu).tail =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) &per_cpu(tasklet_hi_vec, cpu).head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) open_softirq(TASKLET_SOFTIRQ, tasklet_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) open_softirq(HI_SOFTIRQ, tasklet_hi_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) static int ksoftirqd_should_run(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) return local_softirq_pending();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) static void run_ksoftirqd(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (local_softirq_pending()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * We can safely run softirq on inline stack, as we are not deep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * in the task stack here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) __do_softirq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * tasklet_kill_immediate is called to remove a tasklet which can already be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * scheduled for execution on @cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * Unlike tasklet_kill, this function removes the tasklet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * When this function is called, @cpu must be in the CPU_DEAD state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) struct tasklet_struct **i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) BUG_ON(cpu_online(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (!test_bit(TASKLET_STATE_SCHED, &t->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) /* CPU is dead, so no lock needed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (*i == t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) *i = t->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) /* If this was the tail element, move the tail ptr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (*i == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) per_cpu(tasklet_vec, cpu).tail = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) static int takeover_tasklets(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) /* CPU is dead, so no lock needed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) /* Find end, append list for that CPU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) per_cpu(tasklet_vec, cpu).head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) raise_softirq_irqoff(TASKLET_SOFTIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) per_cpu(tasklet_hi_vec, cpu).head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) raise_softirq_irqoff(HI_SOFTIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) #define takeover_tasklets NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) #endif /* CONFIG_HOTPLUG_CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) static struct smp_hotplug_thread softirq_threads = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) .store = &ksoftirqd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) .thread_should_run = ksoftirqd_should_run,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) .thread_fn = run_ksoftirqd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) .thread_comm = "ksoftirqd/%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) static __init int spawn_ksoftirqd(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) takeover_tasklets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) early_initcall(spawn_ksoftirqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * [ These __weak aliases are kept in a separate compilation unit, so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * GCC does not inline them incorrectly. ]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) int __init __weak early_irq_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) int __init __weak arch_probe_nr_irqs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) return NR_IRQS_LEGACY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) int __init __weak arch_early_irq_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) return from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }