Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Provides a framework for enqueueing and running callbacks from hardirq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * context. The enqueueing is NMI-safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/irq_work.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/irqflags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/tick.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) static DEFINE_PER_CPU(struct llist_head, raised_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) static DEFINE_PER_CPU(struct llist_head, lazy_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * Claim the entry so that no one else will poke at it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) static bool irq_work_claim(struct irq_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	int oflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	 * If the work is already pending, no need to raise the IPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	 * The pairing atomic_fetch_andnot() in irq_work_run() makes sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	 * everything we did before is visible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	if (oflags & IRQ_WORK_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) void __weak arch_irq_work_raise(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	 * Lame architectures will get the timer tick callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) /* Enqueue on current CPU, work must already be claimed and preempt disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) static void __irq_work_queue_local(struct irq_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	/* If the work is "lazy", handle it from next tick if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	if (atomic_read(&work->flags) & IRQ_WORK_LAZY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		    tick_nohz_tick_stopped())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 			arch_irq_work_raise();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 		if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 			arch_irq_work_raise();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) /* Enqueue the irq work @work on the current CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) bool irq_work_queue(struct irq_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	/* Only queue if not already pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	if (!irq_work_claim(work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	/* Queue the entry and raise the IPI if needed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	__irq_work_queue_local(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) EXPORT_SYMBOL_GPL(irq_work_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83)  * Enqueue the irq_work @work on @cpu unless it's already pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  * somewhere.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  * Can be re-enqueued while the callback is still in progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) bool irq_work_queue_on(struct irq_work *work, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) #ifndef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	return irq_work_queue(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) #else /* CONFIG_SMP: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	/* All work should have been flushed before going offline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	WARN_ON_ONCE(cpu_is_offline(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	/* Only queue if not already pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	if (!irq_work_claim(work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	if (cpu != smp_processor_id()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		/* Arch remote IPI send/receive backend aren't NMI safe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		WARN_ON_ONCE(in_nmi());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		__smp_call_single_queue(cpu, &work->llnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		__irq_work_queue_local(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) EXPORT_SYMBOL_GPL(irq_work_queue_on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) bool irq_work_needs_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	struct llist_head *raised, *lazy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	raised = this_cpu_ptr(&raised_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	lazy = this_cpu_ptr(&lazy_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	if (llist_empty(raised) || arch_irq_work_has_interrupt())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		if (llist_empty(lazy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	/* All work should have been flushed before going offline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) void irq_work_single(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	struct irq_work *work = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	 * Clear the PENDING bit, after this point the @work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	 * can be re-used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	 * Make it immediately visible so that other CPUs trying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	 * to claim that work don't rely on us to handle their data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	 * while we are in the middle of the func.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	flags = atomic_fetch_andnot(IRQ_WORK_PENDING, &work->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	lockdep_irq_work_enter(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	work->func(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	lockdep_irq_work_exit(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	 * Clear the BUSY bit and return to the free state if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	 * no-one else claimed it meanwhile.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	flags &= ~IRQ_WORK_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	(void)atomic_cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static void irq_work_run_list(struct llist_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	struct irq_work *work, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	struct llist_node *llnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	BUG_ON(!irqs_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	if (llist_empty(list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	llnode = llist_del_all(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	llist_for_each_entry_safe(work, tmp, llnode, llnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		irq_work_single(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  * hotplug calls this through:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  *  hotplug_cfd() -> flush_smp_call_function_queue()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) void irq_work_run(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	irq_work_run_list(this_cpu_ptr(&raised_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	irq_work_run_list(this_cpu_ptr(&lazy_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) EXPORT_SYMBOL_GPL(irq_work_run);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) void irq_work_tick(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	struct llist_head *raised = this_cpu_ptr(&raised_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		irq_work_run_list(raised);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	irq_work_run_list(this_cpu_ptr(&lazy_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)  * Synchronize against the irq_work @entry, ensures the entry is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)  * currently in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) void irq_work_sync(struct irq_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	lockdep_assert_irqs_enabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	while (atomic_read(&work->flags) & IRQ_WORK_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) EXPORT_SYMBOL_GPL(irq_work_sync);