Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * This file contains spurious interrupt handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include "internals.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) static int irqfixup __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) static void poll_spurious_irqs(struct timer_list *unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) static int irq_poll_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) static atomic_t irq_poll_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * We wait here for a poller to finish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * If the poll runs on this CPU, then we yell loudly and return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * false. That will leave the interrupt line disabled in the worst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * case, but it should never happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * We wait until the poller is done and then recheck disabled and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * action (about to be disabled). Only if it's still active, we return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * true and let the handler run.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) bool irq_wait_for_poll(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	__must_hold(&desc->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 		      "irq poll in progress on cpu %d for irq %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		      smp_processor_id(), desc->irq_data.irq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 		raw_spin_unlock(&desc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 		while (irqd_irq_inprogress(&desc->irq_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 			cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 		raw_spin_lock(&desc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	} while (irqd_irq_inprogress(&desc->irq_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	/* Might have been disabled in meantime */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	return !irqd_irq_disabled(&desc->irq_data) && desc->action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * Recovery handler for misrouted interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) static int try_one_irq(struct irq_desc *desc, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	irqreturn_t ret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	struct irqaction *action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	raw_spin_lock(&desc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	 * PER_CPU, nested thread interrupts and interrupts explicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	 * marked polled are excluded from polling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	if (irq_settings_is_per_cpu(desc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	    irq_settings_is_nested_thread(desc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	    irq_settings_is_polled(desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	 * Do not poll disabled interrupts unless the spurious
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	 * disabled poller asks explicitly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	if (irqd_irq_disabled(&desc->irq_data) && !force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	 * All handlers must agree on IRQF_SHARED, so we test just the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	 * first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	action = desc->action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	if (!action || !(action->flags & IRQF_SHARED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	    (action->flags & __IRQF_TIMER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	/* Already running on another processor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	if (irqd_irq_inprogress(&desc->irq_data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		 * Already running: If it is shared get the other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		 * CPU to go looking for our mystery interrupt too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		desc->istate |= IRQS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	/* Mark it poll in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	desc->istate |= IRQS_POLL_INPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		if (handle_irq_event(desc) == IRQ_HANDLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 			ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		/* Make sure that there is still a valid action */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		action = desc->action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	} while ((desc->istate & IRQS_PENDING) && action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	desc->istate &= ~IRQS_POLL_INPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	raw_spin_unlock(&desc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	return ret == IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static int misrouted_irq(int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	struct irq_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	int i, ok = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	if (atomic_inc_return(&irq_poll_active) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	irq_poll_cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	for_each_irq_desc(i, desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		if (!i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 			 continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		if (i == irq)	/* Already tried */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		if (try_one_irq(desc, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 			ok = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	atomic_dec(&irq_poll_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	/* So the caller can adjust the irq error counts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	return ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static void poll_spurious_irqs(struct timer_list *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	struct irq_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	if (atomic_inc_return(&irq_poll_active) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	irq_poll_cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	for_each_irq_desc(i, desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		unsigned int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		if (!i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 			 continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		/* Racy but it doesn't matter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		state = desc->istate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		if (!(state & IRQS_SPURIOUS_DISABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		try_one_irq(desc, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	atomic_dec(&irq_poll_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	mod_timer(&poll_spurious_irq_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		  jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static inline int bad_action_ret(irqreturn_t action_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	unsigned int r = action_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	if (likely(r <= (IRQ_HANDLED | IRQ_WAKE_THREAD)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  * If 99,900 of the previous 100,000 interrupts have not been handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  * then assume that the IRQ is stuck in some manner. Drop a diagnostic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  * and try to turn the IRQ off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)  * (The other 100-of-100,000 interrupts may have been a correctly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)  *  functioning device sharing an IRQ with the failing one)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static void __report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	unsigned int irq = irq_desc_get_irq(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	struct irqaction *action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	if (bad_action_ret(action_ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		printk(KERN_ERR "irq event %d: bogus return value %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 				irq, action_ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		printk(KERN_ERR "irq %d: nobody cared (try booting with "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 				"the \"irqpoll\" option)\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	printk(KERN_ERR "handlers:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	 * We need to take desc->lock here. note_interrupt() is called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	 * w/o desc->lock held, but IRQ_PROGRESS set. We might race
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	 * with something else removing an action. It's ok to take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	 * desc->lock here. See synchronize_irq().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	raw_spin_lock_irqsave(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	for_each_action_of_desc(desc, action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		printk(KERN_ERR "[<%p>] %ps", action->handler, action->handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		if (action->thread_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 			printk(KERN_CONT " threaded [<%p>] %ps",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 					action->thread_fn, action->thread_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		printk(KERN_CONT "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	raw_spin_unlock_irqrestore(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static void report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	static int count = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	if (count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		__report_bad_irq(desc, action_ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		  irqreturn_t action_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	struct irqaction *action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	if (!irqfixup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	/* We didn't actually handle the IRQ - see if it was misrouted? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	if (action_ret == IRQ_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	 * But for 'irqfixup == 2' we also do it for handled interrupts if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	 * they are marked as IRQF_IRQPOLL (or for irq zero, which is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	 * traditional PC timer interrupt.. Legacy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	if (irqfixup < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	if (!irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	 * Since we don't get the descriptor lock, "action" can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	 * change under us.  We don't really care, but we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	 * want to follow a NULL pointer. So tell the compiler to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	 * just load it once by using a barrier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	action = desc->action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	return action && (action->flags & IRQF_IRQPOLL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) #define SPURIOUS_DEFERRED	0x80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	unsigned int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	if (desc->istate & IRQS_POLL_INPROGRESS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	    irq_settings_is_polled(desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	if (bad_action_ret(action_ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		report_bad_irq(desc, action_ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	 * We cannot call note_interrupt from the threaded handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	 * because we need to look at the compound of all handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	 * (primary and threaded). Aside of that in the threaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	 * shared case we have no serialization against an incoming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	 * hardware interrupt while we are dealing with a threaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	 * result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	 * So in case a thread is woken, we just note the fact and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	 * defer the analysis to the next hardware interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	 * The threaded handlers store whether they successfully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	 * handled an interrupt and we check whether that number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	 * changed versus the last invocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	 * We could handle all interrupts with the delayed by one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	 * mechanism, but for the non forced threaded case we'd just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	 * add pointless overhead to the straight hardirq interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	 * for the sake of a few lines less code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	if (action_ret & IRQ_WAKE_THREAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		 * There is a thread woken. Check whether one of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		 * shared primary handlers returned IRQ_HANDLED. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		 * not we defer the spurious detection to the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		 * interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		if (action_ret == IRQ_WAKE_THREAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 			int handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 			 * We use bit 31 of thread_handled_last to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 			 * denote the deferred spurious detection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 			 * active. No locking necessary as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 			 * thread_handled_last is only accessed here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 			 * and we have the guarantee that hard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 			 * interrupts are not reentrant.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 			if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 				desc->threads_handled_last |= SPURIOUS_DEFERRED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 			 * Check whether one of the threaded handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 			 * returned IRQ_HANDLED since the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 			 * interrupt happened.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 			 * For simplicity we just set bit 31, as it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 			 * set in threads_handled_last as well. So we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 			 * avoid extra masking. And we really do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 			 * care about the high bits of the handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			 * count. We just care about the count being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 			 * different than the one we saw before.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 			handled = atomic_read(&desc->threads_handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 			handled |= SPURIOUS_DEFERRED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 			if (handled != desc->threads_handled_last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 				action_ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 				 * Note: We keep the SPURIOUS_DEFERRED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 				 * bit set. We are handling the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 				 * previous invocation right now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 				 * Keep it for the current one, so the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 				 * next hardware interrupt will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 				 * account for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 				desc->threads_handled_last = handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 				 * None of the threaded handlers felt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 				 * responsible for the last interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 				 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 				 * We keep the SPURIOUS_DEFERRED bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 				 * set in threads_handled_last as we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 				 * need to account for the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 				 * interrupt as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 				action_ret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 			 * One of the primary handlers returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 			 * IRQ_HANDLED. So we don't care about the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 			 * threaded handlers on the same line. Clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 			 * the deferred detection bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 			 * In theory we could/should check whether the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 			 * deferred bit is set and take the result of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 			 * the previous run into account here as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 			 * well. But it's really not worth the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 			 * trouble. If every other interrupt is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 			 * handled we never trigger the spurious
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 			 * detector. And if this is just the one out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 			 * of 100k unhandled ones which is handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 			 * then we merily delay the spurious detection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 			 * by one hard interrupt. Not a real problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 			desc->threads_handled_last &= ~SPURIOUS_DEFERRED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	if (unlikely(action_ret == IRQ_NONE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		 * If we are seeing only the odd spurious IRQ caused by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		 * bus asynchronicity then don't eventually trigger an error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		 * otherwise the counter becomes a doomsday timer for otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		 * working systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		if (time_after(jiffies, desc->last_unhandled + HZ/10))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 			desc->irqs_unhandled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 			desc->irqs_unhandled++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		desc->last_unhandled = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	irq = irq_desc_get_irq(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		int ok = misrouted_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		if (action_ret == IRQ_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 			desc->irqs_unhandled -= ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	desc->irq_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	if (likely(desc->irq_count < 100000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	desc->irq_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	if (unlikely(desc->irqs_unhandled > 99900)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		 * The interrupt is stuck
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		__report_bad_irq(desc, action_ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		 * Now kill the IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		desc->istate |= IRQS_SPURIOUS_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		desc->depth++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		irq_disable(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		mod_timer(&poll_spurious_irq_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 			  jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	desc->irqs_unhandled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) bool noirqdebug __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) int noirqdebug_setup(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	noirqdebug = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	printk(KERN_INFO "IRQ lockup detection disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) __setup("noirqdebug", noirqdebug_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) module_param(noirqdebug, bool, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static int __init irqfixup_setup(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	irqfixup = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	printk(KERN_WARNING "This may impact system performance.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) __setup("irqfixup", irqfixup_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) module_param(irqfixup, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static int __init irqpoll_setup(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	irqfixup = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 				"enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	printk(KERN_WARNING "This may significantly impact system "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 				"performance\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) __setup("irqpoll", irqpoll_setup);