^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2005-2006 Thomas Gleixner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * This file contains driver APIs to the irq subsystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define pr_fmt(fmt) "genirq: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/sched/rt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/sched/isolation.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <uapi/linux/sched/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/task_work.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "internals.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) __read_mostly bool force_irqthreads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) EXPORT_SYMBOL_GPL(force_irqthreads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static int __init setup_forced_irqthreads(char *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) force_irqthreads = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) early_param("threadirqs", setup_forced_irqthreads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct irq_data *irqd = irq_desc_get_irq_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) bool inprogress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * Wait until we're out of the critical section. This might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * give the wrong answer due to the lack of memory barriers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) while (irqd_irq_inprogress(&desc->irq_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* Ok, that indicated we're done: double-check carefully. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) raw_spin_lock_irqsave(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) inprogress = irqd_irq_inprogress(&desc->irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * If requested and supported, check at the chip whether it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * is in flight at the hardware level, i.e. already pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * in a CPU and waiting for service and acknowledge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (!inprogress && sync_chip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * Ignore the return code. inprogress is only updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * when the chip supports it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) &inprogress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) raw_spin_unlock_irqrestore(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* Oops, that failed? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) } while (inprogress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * @irq: interrupt number to wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * This function waits for any pending hard IRQ handlers for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * interrupt to complete before returning. If you use this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * function while holding a resource the IRQ handler may need you
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * will deadlock. It does not take associated threaded handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * into account.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * Do not use this for shutdown scenarios where you must be sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * that all parts (hardirq and threaded handler) have completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * Returns: false if a threaded handler is active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * This function may be called - with care - from IRQ context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * It does not check whether there is an interrupt in flight at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * hardware level, but not serviced yet, as this might deadlock when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * called with interrupts disabled and the target CPU of the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * is the current CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) bool synchronize_hardirq(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct irq_desc *desc = irq_to_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) __synchronize_hardirq(desc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return !atomic_read(&desc->threads_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) EXPORT_SYMBOL(synchronize_hardirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * @irq: interrupt number to wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * This function waits for any pending IRQ handlers for this interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * to complete before returning. If you use this function while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * holding a resource the IRQ handler may need you will deadlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * Can only be called from preemptible code as it might sleep when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * an interrupt thread is associated to @irq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * It optionally makes sure (when the irq chip supports that method)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * that the interrupt is not pending in any CPU and waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * service.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) void synchronize_irq(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct irq_desc *desc = irq_to_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) __synchronize_hardirq(desc, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * We made sure that no hardirq handler is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * running. Now verify that no threaded handlers are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) wait_event(desc->wait_for_threads,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) !atomic_read(&desc->threads_active));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) EXPORT_SYMBOL(synchronize_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) cpumask_var_t irq_default_affinity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static bool __irq_can_set_affinity(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (!desc || !irqd_can_balance(&desc->irq_data) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * irq_can_set_affinity - Check if the affinity of a given irq can be set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * @irq: Interrupt to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) int irq_can_set_affinity(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return __irq_can_set_affinity(irq_to_desc(irq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * @irq: Interrupt to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * Like irq_can_set_affinity() above, but additionally checks for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * AFFINITY_MANAGED flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) bool irq_can_set_affinity_usr(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct irq_desc *desc = irq_to_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return __irq_can_set_affinity(desc) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) !irqd_affinity_is_managed(&desc->irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * irq_set_thread_affinity - Notify irq threads to adjust affinity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * @desc: irq descriptor which has affitnity changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * We just set IRQTF_AFFINITY and delegate the affinity setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * to the interrupt thread itself. We can not call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * set_cpus_allowed_ptr() here as we hold desc->lock and this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * code can be called from hard interrupt context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) void irq_set_thread_affinity(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct irqaction *action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) for_each_action_of_desc(desc, action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (action->thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) set_bit(IRQTF_AFFINITY, &action->thread_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static void irq_validate_effective_affinity(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct irq_chip *chip = irq_data_get_irq_chip(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (!cpumask_empty(m))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) chip->name, data->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static inline void irq_init_effective_affinity(struct irq_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) const struct cpumask *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) cpumask_copy(irq_data_get_effective_affinity_mask(data), mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static inline void irq_validate_effective_affinity(struct irq_data *data) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static inline void irq_init_effective_affinity(struct irq_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) const struct cpumask *mask) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct irq_desc *desc = irq_data_to_desc(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct irq_chip *chip = irq_data_get_irq_chip(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (!chip || !chip->irq_set_affinity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * If this is a managed interrupt and housekeeping is enabled on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * it check whether the requested affinity mask intersects with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * a housekeeping CPU. If so, then remove the isolated CPUs from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * the mask and just keep the housekeeping CPU(s). This prevents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * the affinity setter from routing the interrupt to an isolated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * CPU to avoid that I/O submitted from a housekeeping CPU causes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * interrupts on an isolated one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * If the masks do not intersect or include online CPU(s) then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * keep the requested mask. The isolated target CPUs are only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * receiving interrupts when the I/O operation was submitted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * directly from them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * If all housekeeping CPUs in the affinity mask are offline, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * interrupt will be migrated by the CPU hotplug code once a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * housekeeping CPU which belongs to the affinity mask comes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * online.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (irqd_affinity_is_managed(data) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) const struct cpumask *hk_mask, *prog_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static struct cpumask tmp_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) raw_spin_lock(&tmp_mask_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) cpumask_and(&tmp_mask, mask, hk_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) prog_mask = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) prog_mask = &tmp_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) ret = chip->irq_set_affinity(data, prog_mask, force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) raw_spin_unlock(&tmp_mask_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) ret = chip->irq_set_affinity(data, mask, force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) case IRQ_SET_MASK_OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) case IRQ_SET_MASK_OK_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) cpumask_copy(desc->irq_common_data.affinity, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) case IRQ_SET_MASK_OK_NOCOPY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) irq_validate_effective_affinity(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) irq_set_thread_affinity(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) EXPORT_SYMBOL_GPL(irq_do_set_affinity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #ifdef CONFIG_GENERIC_PENDING_IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static inline int irq_set_affinity_pending(struct irq_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) const struct cpumask *dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct irq_desc *desc = irq_data_to_desc(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) irqd_set_move_pending(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) irq_copy_pending(desc, dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static inline int irq_set_affinity_pending(struct irq_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) const struct cpumask *dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static int irq_try_set_affinity(struct irq_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) const struct cpumask *dest, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) int ret = irq_do_set_affinity(data, dest, force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * In case that the underlying vector management is busy and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * architecture supports the generic pending mechanism then utilize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * this to avoid returning an error to user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (ret == -EBUSY && !force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) ret = irq_set_affinity_pending(data, dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static bool irq_set_affinity_deactivated(struct irq_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) const struct cpumask *mask, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct irq_desc *desc = irq_data_to_desc(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * Handle irq chips which can handle affinity only in activated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * state correctly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * If the interrupt is not yet activated, just store the affinity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * mask and do not call the chip driver at all. On activation the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * driver has to make sure anyway that the interrupt is in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * useable state so startup works.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) irqd_is_activated(data) || !irqd_affinity_on_activate(data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) cpumask_copy(desc->irq_common_data.affinity, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) irq_init_effective_affinity(data, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) irqd_set(data, IRQD_AFFINITY_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct irq_chip *chip = irq_data_get_irq_chip(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct irq_desc *desc = irq_data_to_desc(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (!chip || !chip->irq_set_affinity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (irq_set_affinity_deactivated(data, mask, force))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) ret = irq_try_set_affinity(data, mask, force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) irqd_set_move_pending(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) irq_copy_pending(desc, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (desc->affinity_notify) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) kref_get(&desc->affinity_notify->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (!schedule_work(&desc->affinity_notify->work)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* Work was already scheduled, drop our extra ref */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) kref_put(&desc->affinity_notify->kref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) desc->affinity_notify->release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) irqd_set(data, IRQD_AFFINITY_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct irq_desc *desc = irq_to_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) raw_spin_lock_irqsave(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) raw_spin_unlock_irqrestore(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) desc->affinity_hint = m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) irq_put_desc_unlock(desc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /* set the initial affinity to prevent every interrupt being on CPU0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) __irq_set_affinity(irq, m, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static void irq_affinity_notify(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct irq_affinity_notify *notify =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) container_of(work, struct irq_affinity_notify, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct irq_desc *desc = irq_to_desc(notify->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) cpumask_var_t cpumask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) raw_spin_lock_irqsave(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (irq_move_pending(&desc->irq_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) irq_get_pending(cpumask, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) cpumask_copy(cpumask, desc->irq_common_data.affinity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) raw_spin_unlock_irqrestore(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) notify->notify(notify, cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) free_cpumask_var(cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) kref_put(¬ify->kref, notify->release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * irq_set_affinity_notifier - control notification of IRQ affinity changes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * @irq: Interrupt for which to enable/disable notification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * @notify: Context for notification, or %NULL to disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * notification. Function pointers must be initialised;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * the other fields will be initialised by this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * Must be called in process context. Notification may only be enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * after the IRQ is allocated and must be disabled before the IRQ is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * freed using free_irq().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct irq_desc *desc = irq_to_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct irq_affinity_notify *old_notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /* The release function is promised process context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (!desc || desc->istate & IRQS_NMI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /* Complete initialisation of *notify */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (notify) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) notify->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) kref_init(¬ify->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) INIT_WORK(¬ify->work, irq_affinity_notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) raw_spin_lock_irqsave(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) old_notify = desc->affinity_notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) desc->affinity_notify = notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) raw_spin_unlock_irqrestore(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (old_notify) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (cancel_work_sync(&old_notify->work)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /* Pending work had a ref, put that one too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) kref_put(&old_notify->kref, old_notify->release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) kref_put(&old_notify->kref, old_notify->release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) #ifndef CONFIG_AUTO_IRQ_AFFINITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * Generic version of the affinity autoselector.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) int irq_setup_affinity(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) struct cpumask *set = irq_default_affinity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) int ret, node = irq_desc_get_node(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) static DEFINE_RAW_SPINLOCK(mask_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) static struct cpumask mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /* Excludes PER_CPU and NO_BALANCE interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (!__irq_can_set_affinity(desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) raw_spin_lock(&mask_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * Preserve the managed affinity setting and a userspace affinity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * setup, but make sure that one of the targets is online.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (irqd_affinity_is_managed(&desc->irq_data) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (cpumask_intersects(desc->irq_common_data.affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) cpu_online_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) set = desc->irq_common_data.affinity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) cpumask_and(&mask, cpu_online_mask, set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (cpumask_empty(&mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) cpumask_copy(&mask, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (node != NUMA_NO_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) const struct cpumask *nodemask = cpumask_of_node(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /* make sure at least one of the cpus in nodemask is online */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (cpumask_intersects(&mask, nodemask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) cpumask_and(&mask, &mask, nodemask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) raw_spin_unlock(&mask_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) /* Wrapper for ALPHA specific affinity selector magic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) int irq_setup_affinity(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return irq_select_affinity(irq_desc_get_irq(desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) #endif /* CONFIG_AUTO_IRQ_AFFINITY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * @irq: interrupt number to set affinity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * specific data for percpu_devid interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * This function uses the vCPU specific data to set the vCPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * affinity for an irq. The vCPU specific data is passed from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * outside, such as KVM. One example code path is as below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * KVM -> IOMMU -> irq_set_vcpu_affinity().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) struct irq_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) struct irq_chip *chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) int ret = -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) data = irq_desc_get_irq_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) chip = irq_data_get_irq_chip(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (chip && chip->irq_set_vcpu_affinity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) data = data->parent_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) } while (data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) irq_put_desc_unlock(desc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) void __disable_irq(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (!desc->depth++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) irq_disable(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) static int __disable_irq_nosync(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) __disable_irq(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) irq_put_desc_busunlock(desc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * disable_irq_nosync - disable an irq without waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * @irq: Interrupt to disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * Disable the selected interrupt line. Disables and Enables are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * nested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * Unlike disable_irq(), this function does not ensure existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * instances of the IRQ handler have completed before returning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * This function may be called from IRQ context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) void disable_irq_nosync(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) __disable_irq_nosync(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) EXPORT_SYMBOL(disable_irq_nosync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * disable_irq - disable an irq and wait for completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * @irq: Interrupt to disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * Disable the selected interrupt line. Enables and Disables are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * nested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * This function waits for any pending IRQ handlers for this interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * to complete before returning. If you use this function while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * holding a resource the IRQ handler may need you will deadlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * This function may be called - with care - from IRQ context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) void disable_irq(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (!__disable_irq_nosync(irq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) synchronize_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) EXPORT_SYMBOL(disable_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * disable_hardirq - disables an irq and waits for hardirq completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * @irq: Interrupt to disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * Disable the selected interrupt line. Enables and Disables are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * nested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * This function waits for any pending hard IRQ handlers for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * interrupt to complete before returning. If you use this function while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * holding a resource the hard IRQ handler may need you will deadlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * When used to optimistically disable an interrupt from atomic context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * the return value must be checked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * Returns: false if a threaded handler is active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * This function may be called - with care - from IRQ context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) bool disable_hardirq(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (!__disable_irq_nosync(irq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return synchronize_hardirq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) EXPORT_SYMBOL_GPL(disable_hardirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * disable_nmi_nosync - disable an nmi without waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * @irq: Interrupt to disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * Disable the selected interrupt line. Disables and enables are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * nested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * The interrupt to disable must have been requested through request_nmi.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * Unlike disable_nmi(), this function does not ensure existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * instances of the IRQ handler have completed before returning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) void disable_nmi_nosync(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) disable_irq_nosync(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) void __enable_irq(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) switch (desc->depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) irq_desc_get_irq(desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) case 1: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (desc->istate & IRQS_SUSPENDED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /* Prevent probing on this irq: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) irq_settings_set_noprobe(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * Call irq_startup() not irq_enable() here because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * interrupt might be marked NOAUTOEN. So irq_startup()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * needs to be invoked when it gets enabled the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * time. If it was already started up, then irq_startup()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * will invoke irq_enable() under the hood.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) desc->depth--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * enable_irq - enable handling of an irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * @irq: Interrupt to enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * Undoes the effect of one call to disable_irq(). If this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * matches the last disable, processing of interrupts on this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * IRQ line is re-enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * This function may be called from IRQ context only when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) void enable_irq(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (WARN(!desc->irq_data.chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) __enable_irq(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) irq_put_desc_busunlock(desc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) EXPORT_SYMBOL(enable_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * enable_nmi - enable handling of an nmi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * @irq: Interrupt to enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * The interrupt to enable must have been requested through request_nmi.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * Undoes the effect of one call to disable_nmi(). If this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * matches the last disable, processing of interrupts on this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * IRQ line is re-enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) void enable_nmi(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) enable_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) static int set_irq_wake_real(unsigned int irq, unsigned int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) struct irq_desc *desc = irq_to_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) int ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (desc->irq_data.chip->irq_set_wake)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * irq_set_irq_wake - control irq power management wakeup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * @irq: interrupt to control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * @on: enable/disable power management wakeup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * Enable/disable power management wakeup mode, which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * disabled by default. Enables and disables must match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * just as they match for non-wakeup mode support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * Wakeup mode lets this IRQ wake the system from sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * states like "suspend to RAM".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * Note: irq enable/disable state is completely orthogonal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * to the enable/disable state of irq wake. An irq can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * disabled with disable_irq() and still wake the system as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * long as the irq has wake enabled. If this does not hold,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * then the underlying irq chip and the related driver need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * to be investigated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) int irq_set_irq_wake(unsigned int irq, unsigned int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) /* Don't use NMIs as wake up interrupts please */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (desc->istate & IRQS_NMI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) /* wakeup-capable irqs can be shared between drivers that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * don't need to have the same sleep mode behaviors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (desc->wake_depth++ == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) ret = set_irq_wake_real(irq, on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) desc->wake_depth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (desc->wake_depth == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) } else if (--desc->wake_depth == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) ret = set_irq_wake_real(irq, on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) desc->wake_depth = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) irq_put_desc_busunlock(desc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) EXPORT_SYMBOL(irq_set_irq_wake);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * Internal function that tells the architecture code whether a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * particular irq has been exclusively allocated or is available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * for driver use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) int can_request_irq(unsigned int irq, unsigned long irqflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) int canrequest = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (irq_settings_can_request(desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (!desc->action ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) irqflags & desc->action->flags & IRQF_SHARED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) canrequest = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) irq_put_desc_unlock(desc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) return canrequest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) struct irq_chip *chip = desc->irq_data.chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) int ret, unmask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (!chip || !chip->irq_set_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * IRQF_TRIGGER_* but the PIC does not support multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * flow-types?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) pr_debug("No set_type function for IRQ %d (%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) irq_desc_get_irq(desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) chip ? (chip->name ? : "unknown") : "unknown");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (!irqd_irq_masked(&desc->irq_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) mask_irq(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (!irqd_irq_disabled(&desc->irq_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) unmask = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) /* Mask all flags except trigger mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) flags &= IRQ_TYPE_SENSE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) ret = chip->irq_set_type(&desc->irq_data, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) case IRQ_SET_MASK_OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) case IRQ_SET_MASK_OK_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) irqd_set(&desc->irq_data, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) case IRQ_SET_MASK_OK_NOCOPY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) flags = irqd_get_trigger_type(&desc->irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) irq_settings_set_trigger_mask(desc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) irqd_clear(&desc->irq_data, IRQD_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) irq_settings_clr_level(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (flags & IRQ_TYPE_LEVEL_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) irq_settings_set_level(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) irqd_set(&desc->irq_data, IRQD_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) flags, irq_desc_get_irq(desc), chip->irq_set_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (unmask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) unmask_irq(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) #ifdef CONFIG_HARDIRQS_SW_RESEND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) int irq_set_parent(int irq, int parent_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) desc->parent_irq = parent_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) irq_put_desc_unlock(desc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) EXPORT_SYMBOL_GPL(irq_set_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * Default primary interrupt handler for threaded interrupts. Is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * assigned as primary handler when request_threaded_irq is called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * with handler == NULL. Useful for oneshot interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) return IRQ_WAKE_THREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * Primary handler for nested threaded interrupts. Should never be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) WARN(1, "Primary handler called for nested irq %d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) WARN(1, "Secondary action handler called for irq %d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) static int irq_wait_for_interrupt(struct irqaction *action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) /* may need to run one last time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (test_and_clear_bit(IRQTF_RUNTHREAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) &action->thread_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (test_and_clear_bit(IRQTF_RUNTHREAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) &action->thread_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * Oneshot interrupts keep the irq line masked until the threaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * handler finished. unmask if the interrupt has not been disabled and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * is marked MASKED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) static void irq_finalize_oneshot(struct irq_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) struct irqaction *action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (!(desc->istate & IRQS_ONESHOT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) action->handler == irq_forced_secondary_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) chip_bus_lock(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) raw_spin_lock_irq(&desc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * Implausible though it may be we need to protect us against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * the following scenario:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * The thread is faster done than the hard interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * on the other CPU. If we unmask the irq line then the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * interrupt can come in again and masks the line, leaves due
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * to IRQS_INPROGRESS and the irq line is masked forever.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * This also serializes the state of shared oneshot handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * versus "desc->threads_onehsot |= action->thread_mask;" in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * irq_wake_thread(). See the comment there which explains the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * serialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) raw_spin_unlock_irq(&desc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) chip_bus_sync_unlock(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * Now check again, whether the thread should run. Otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * we would clear the threads_oneshot bit of this thread which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * was just set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) desc->threads_oneshot &= ~action->thread_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) irqd_irq_masked(&desc->irq_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) unmask_threaded_irq(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) raw_spin_unlock_irq(&desc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) chip_bus_sync_unlock(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) * Check whether we need to change the affinity of the interrupt thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) cpumask_var_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) bool valid = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * In case we are out of memory we set IRQTF_AFFINITY again and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * try again next time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) set_bit(IRQTF_AFFINITY, &action->thread_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) raw_spin_lock_irq(&desc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * This code is triggered unconditionally. Check the affinity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) if (cpumask_available(desc->irq_common_data.affinity)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) const struct cpumask *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) m = irq_data_get_effective_affinity_mask(&desc->irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) cpumask_copy(mask, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) valid = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) raw_spin_unlock_irq(&desc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) set_cpus_allowed_ptr(current, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) free_cpumask_var(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * Interrupts which are not explicitly requested as threaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * interrupts rely on the implicit bh/preempt disable of the hard irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * context. So we need to disable bh here to avoid deadlocks and other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * side effects.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) static irqreturn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) irqreturn_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (!IS_ENABLED(CONFIG_PREEMPT_RT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) ret = action->thread_fn(action->irq, action->dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (ret == IRQ_HANDLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) atomic_inc(&desc->threads_handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) irq_finalize_oneshot(desc, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) if (!IS_ENABLED(CONFIG_PREEMPT_RT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * Interrupts explicitly requested as threaded interrupts want to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) * preemtible - many of them need to sleep and wait for slow busses to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) static irqreturn_t irq_thread_fn(struct irq_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) struct irqaction *action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) irqreturn_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) ret = action->thread_fn(action->irq, action->dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) if (ret == IRQ_HANDLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) atomic_inc(&desc->threads_handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) irq_finalize_oneshot(desc, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) static void wake_threads_waitq(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) if (atomic_dec_and_test(&desc->threads_active))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) wake_up(&desc->wait_for_threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) static void irq_thread_dtor(struct callback_head *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) struct task_struct *tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) struct irq_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) struct irqaction *action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) action = kthread_data(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) tsk->comm, tsk->pid, action->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) desc = irq_to_desc(action->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * If IRQTF_RUNTHREAD is set, we need to decrement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) * desc->threads_active and wake possible waiters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) wake_threads_waitq(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) /* Prevent a stale desc->threads_oneshot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) irq_finalize_oneshot(desc, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) struct irqaction *secondary = action->secondary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (WARN_ON_ONCE(!secondary))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) raw_spin_lock_irq(&desc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) __irq_wake_thread(desc, secondary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) raw_spin_unlock_irq(&desc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * Interrupt handler thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) static int irq_thread(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) struct callback_head on_exit_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) struct irqaction *action = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) struct irq_desc *desc = irq_to_desc(action->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) irqreturn_t (*handler_fn)(struct irq_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) struct irqaction *action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) &action->thread_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) handler_fn = irq_forced_thread_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) handler_fn = irq_thread_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) init_task_work(&on_exit_work, irq_thread_dtor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) task_work_add(current, &on_exit_work, TWA_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) irq_thread_check_affinity(desc, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) while (!irq_wait_for_interrupt(action)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) irqreturn_t action_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) irq_thread_check_affinity(desc, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) action_ret = handler_fn(desc, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (action_ret == IRQ_WAKE_THREAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) irq_wake_secondary(desc, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) wake_threads_waitq(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) * This is the regular exit path. __free_irq() is stopping the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) * thread via kthread_stop() after calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * oneshot mask bit can be set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) task_work_cancel(current, irq_thread_dtor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * irq_wake_thread - wake the irq thread for the action identified by dev_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * @irq: Interrupt line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) * @dev_id: Device identity for which the thread should be woken
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) void irq_wake_thread(unsigned int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) struct irq_desc *desc = irq_to_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) struct irqaction *action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) raw_spin_lock_irqsave(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) for_each_action_of_desc(desc, action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (action->dev_id == dev_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (action->thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) __irq_wake_thread(desc, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) raw_spin_unlock_irqrestore(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) EXPORT_SYMBOL_GPL(irq_wake_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) static int irq_setup_forced_threading(struct irqaction *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (!force_irqthreads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * No further action required for interrupts which are requested as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) * threaded interrupts already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) if (new->handler == irq_default_primary_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) new->flags |= IRQF_ONESHOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) * Handle the case where we have a real primary handler and a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) * thread handler. We force thread them as well by creating a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) * secondary action.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (new->handler && new->thread_fn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) /* Allocate the secondary action */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) if (!new->secondary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) new->secondary->handler = irq_forced_secondary_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) new->secondary->thread_fn = new->thread_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) new->secondary->dev_id = new->dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) new->secondary->irq = new->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) new->secondary->name = new->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) /* Deal with the primary handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) new->thread_fn = new->handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) new->handler = irq_default_primary_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) static int irq_request_resources(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) struct irq_data *d = &desc->irq_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) struct irq_chip *c = d->chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) return c->irq_request_resources ? c->irq_request_resources(d) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) static void irq_release_resources(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) struct irq_data *d = &desc->irq_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) struct irq_chip *c = d->chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) if (c->irq_release_resources)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) c->irq_release_resources(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) static bool irq_supports_nmi(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) struct irq_data *d = irq_desc_get_irq_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) /* Only IRQs directly managed by the root irqchip can be set as NMI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (d->parent_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) /* Don't support NMIs for chips behind a slow bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) static int irq_nmi_setup(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) struct irq_data *d = irq_desc_get_irq_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) struct irq_chip *c = d->chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) static void irq_nmi_teardown(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) struct irq_data *d = irq_desc_get_irq_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) struct irq_chip *c = d->chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (c->irq_nmi_teardown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) c->irq_nmi_teardown(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) struct task_struct *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (!secondary) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) new->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) new->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) if (IS_ERR(t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) return PTR_ERR(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) sched_set_fifo(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) * We keep the reference to the task struct even if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) * the thread dies to avoid that the interrupt code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) * references an already freed task_struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) new->thread = get_task_struct(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) * Tell the thread to set its affinity. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) * important for shared interrupt handlers as we do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) * not invoke setup_affinity() for the secondary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) * handlers as everything is already set up. Even for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) * interrupts marked with IRQF_NO_BALANCE this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) * correct as we want the thread to move to the cpu(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) * on which the requesting code placed the interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) set_bit(IRQTF_AFFINITY, &new->thread_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) * Internal function to register an irqaction - typically used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) * allocate special interrupts that are part of the architecture.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) * Locking rules:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) * desc->request_mutex Provides serialization against a concurrent free_irq()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) * chip_bus_lock Provides serialization for slow bus operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) * desc->lock Provides serialization against hard interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) * chip_bus_lock and desc->lock are sufficient for all other management and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) * interrupt related functions. desc->request_mutex solely serializes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) * request/free_irq().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) struct irqaction *old, **old_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) unsigned long flags, thread_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) int ret, nested, shared = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) if (desc->irq_data.chip == &no_irq_chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) if (!try_module_get(desc->owner))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) new->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) * If the trigger type is not specified by the caller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) * then use the default for this interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) if (!(new->flags & IRQF_TRIGGER_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) new->flags |= irqd_get_trigger_type(&desc->irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) * Check whether the interrupt nests into another interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) * thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) nested = irq_settings_is_nested_thread(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) if (nested) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) if (!new->thread_fn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) goto out_mput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) * Replace the primary handler which was provided from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) * the driver for non nested interrupt handling by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) * dummy function which warns when called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) new->handler = irq_nested_primary_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) if (irq_settings_can_thread(desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) ret = irq_setup_forced_threading(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) goto out_mput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) * Create a handler thread when a thread function is supplied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) * and the interrupt does not nest into another interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) * thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) if (new->thread_fn && !nested) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) ret = setup_irq_thread(new, irq, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) goto out_mput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) if (new->secondary) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) ret = setup_irq_thread(new->secondary, irq, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) goto out_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) * Drivers are often written to work w/o knowledge about the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) * underlying irq chip implementation, so a request for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) * threaded irq without a primary hard irq context handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) * requires the ONESHOT flag to be set. Some irq chips like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) * MSI based interrupts are per se one shot safe. Check the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) * chip flags, so we can avoid the unmask dance at the end of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) * the threaded handler for those.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) new->flags &= ~IRQF_ONESHOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) * Protects against a concurrent __free_irq() call which might wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) * for synchronize_hardirq() to complete without holding the optional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) * chip bus lock and desc->lock. Also protects against handing out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) * a recycled oneshot thread_mask bit while it's still in use by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) * its previous owner.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) mutex_lock(&desc->request_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) * Acquire bus lock as the irq_request_resources() callback below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) * might rely on the serialization or the magic power management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) * functions which are abusing the irq_bus_lock() callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) chip_bus_lock(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) /* First installed action requests resources. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if (!desc->action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) ret = irq_request_resources(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) new->name, irq, desc->irq_data.chip->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) goto out_bus_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) * The following block of code has to be executed atomically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) * protected against a concurrent interrupt and any of the other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) * management calls which are not serialized via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) * desc->request_mutex or the optional bus lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) raw_spin_lock_irqsave(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) old_ptr = &desc->action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) old = *old_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) if (old) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) * Can't share interrupts unless both agree to and are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) * the same type (level, edge, polarity). So both flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) * fields must have IRQF_SHARED set and the bits which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) * set the trigger type must match. Also all must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) * agree on ONESHOT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) * Interrupt lines used for NMIs cannot be shared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) unsigned int oldtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (desc->istate & IRQS_NMI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) new->name, irq, desc->irq_data.chip->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) * If nobody did set the configuration before, inherit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) * the one provided by the requester.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) if (irqd_trigger_type_was_set(&desc->irq_data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) oldtype = irqd_get_trigger_type(&desc->irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) oldtype = new->flags & IRQF_TRIGGER_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) irqd_set_trigger_type(&desc->irq_data, oldtype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) if (!((old->flags & new->flags) & IRQF_SHARED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) ((old->flags ^ new->flags) & IRQF_ONESHOT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) goto mismatch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) /* All handlers must agree on per-cpuness */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if ((old->flags & IRQF_PERCPU) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) (new->flags & IRQF_PERCPU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) goto mismatch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) /* add new interrupt at end of irq queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) * Or all existing action->thread_mask bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) * so we can find the next zero bit for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) * new action.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) thread_mask |= old->thread_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) old_ptr = &old->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) old = *old_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) } while (old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) shared = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) * Setup the thread mask for this irqaction for ONESHOT. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) * !ONESHOT irqs the thread mask is 0 so we can avoid a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) * conditional in irq_wake_thread().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (new->flags & IRQF_ONESHOT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) * Unlikely to have 32 resp 64 irqs sharing one line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) * but who knows.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) if (thread_mask == ~0UL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) * The thread_mask for the action is or'ed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) * desc->thread_active to indicate that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) * IRQF_ONESHOT thread handler has been woken, but not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) * yet finished. The bit is cleared when a thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) * completes. When all threads of a shared interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) * line have completed desc->threads_active becomes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) * zero and the interrupt line is unmasked. See
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) * handle.c:irq_wake_thread() for further information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) * If no thread is woken by primary (hard irq context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) * interrupt handlers, then desc->threads_active is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) * also checked for zero to unmask the irq line in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) * affected hard irq flow handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) * (handle_[fasteoi|level]_irq).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) * The new action gets the first zero bit of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) * thread_mask assigned. See the loop above which or's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) * all existing action->thread_mask bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) new->thread_mask = 1UL << ffz(thread_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) } else if (new->handler == irq_default_primary_handler &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) * The interrupt was requested with handler = NULL, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) * we use the default primary handler for it. But it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) * does not have the oneshot flag set. In combination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) * with level interrupts this is deadly, because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) * default primary handler just wakes the thread, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) * the irq lines is reenabled, but the device still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) * has the level irq asserted. Rinse and repeat....
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) * While this works for edge type interrupts, we play
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) * it safe and reject unconditionally because we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) * say for sure which type this interrupt really
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) * has. The type flags are unreliable as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) * underlying chip implementation can override them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) new->name, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) if (!shared) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) init_waitqueue_head(&desc->wait_for_threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) /* Setup the type (level, edge polarity) if configured: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) if (new->flags & IRQF_TRIGGER_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) ret = __irq_set_trigger(desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) new->flags & IRQF_TRIGGER_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) * Activate the interrupt. That activation must happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) * independently of IRQ_NOAUTOEN. request_irq() can fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) * and the callers are supposed to handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) * that. enable_irq() of an interrupt requested with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) * IRQ_NOAUTOEN is not supposed to fail. The activation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) * keeps it in shutdown mode, it merily associates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) * resources if necessary and if that's not possible it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) * fails. Interrupts which are in managed shutdown mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) * will simply ignore that activation request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) ret = irq_activate(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) IRQS_ONESHOT | IRQS_WAITING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) if (new->flags & IRQF_PERCPU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) irqd_set(&desc->irq_data, IRQD_PER_CPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) irq_settings_set_per_cpu(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) if (new->flags & IRQF_ONESHOT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) desc->istate |= IRQS_ONESHOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) /* Exclude IRQ from balancing if requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) if (new->flags & IRQF_NOBALANCING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) irq_settings_set_no_balancing(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) if (irq_settings_can_autoenable(desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) * Shared interrupts do not go well with disabling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) * auto enable. The sharing interrupt might request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) * it while it's still disabled and then wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) * interrupts forever.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) WARN_ON_ONCE(new->flags & IRQF_SHARED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) /* Undo nested disables: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) desc->depth = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) } else if (new->flags & IRQF_TRIGGER_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) if (nmsk != omsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) /* hope the handler works with current trigger mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) pr_warn("irq %d uses trigger mode %u; requested %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) irq, omsk, nmsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) *old_ptr = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) irq_pm_install_action(desc, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) /* Reset broken irq detection when installing new handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) desc->irq_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) desc->irqs_unhandled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) * Check whether we disabled the irq via the spurious handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) * before. Reenable it and give it another chance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) desc->istate &= ~IRQS_SPURIOUS_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) __enable_irq(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) raw_spin_unlock_irqrestore(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) chip_bus_sync_unlock(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) mutex_unlock(&desc->request_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) irq_setup_timings(desc, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) * Strictly no need to wake it up, but hung_task complains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) * when no hard interrupt wakes the thread up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) if (new->thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) wake_up_process(new->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) if (new->secondary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) wake_up_process(new->secondary->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) register_irq_proc(irq, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) new->dir = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) register_handler_proc(irq, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) mismatch:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) if (!(new->flags & IRQF_PROBE_SHARED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) irq, new->flags, new->name, old->flags, old->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) #ifdef CONFIG_DEBUG_SHIRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) raw_spin_unlock_irqrestore(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) if (!desc->action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) irq_release_resources(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) out_bus_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) chip_bus_sync_unlock(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) mutex_unlock(&desc->request_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) out_thread:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) if (new->thread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) struct task_struct *t = new->thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) new->thread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) kthread_stop(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) put_task_struct(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) if (new->secondary && new->secondary->thread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) struct task_struct *t = new->secondary->thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) new->secondary->thread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) kthread_stop(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) put_task_struct(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) out_mput:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) module_put(desc->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) * Internal function to unregister an irqaction - used to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) * regular and special interrupts that are part of the architecture.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) unsigned irq = desc->irq_data.irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) struct irqaction *action, **action_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) mutex_lock(&desc->request_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) chip_bus_lock(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) raw_spin_lock_irqsave(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) * There can be multiple actions per IRQ descriptor, find the right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) * one based on the dev_id:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) action_ptr = &desc->action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) action = *action_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) if (!action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) WARN(1, "Trying to free already-free IRQ %d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) raw_spin_unlock_irqrestore(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) chip_bus_sync_unlock(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) mutex_unlock(&desc->request_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) if (action->dev_id == dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) action_ptr = &action->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) /* Found it - now remove it from the list of entries: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) *action_ptr = action->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) irq_pm_remove_action(desc, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) /* If this was the last handler, shut down the IRQ line: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) if (!desc->action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) irq_settings_clr_disable_unlazy(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) /* Only shutdown. Deactivate after synchronize_hardirq() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) irq_shutdown(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) /* make sure affinity_hint is cleaned up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (WARN_ON_ONCE(desc->affinity_hint))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) desc->affinity_hint = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) raw_spin_unlock_irqrestore(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) * Drop bus_lock here so the changes which were done in the chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) * callbacks above are synced out to the irq chips which hang
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) * Aside of that the bus_lock can also be taken from the threaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) * handler in irq_finalize_oneshot() which results in a deadlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) * because kthread_stop() would wait forever for the thread to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) * complete, which is blocked on the bus lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) * The still held desc->request_mutex() protects against a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) * concurrent request_irq() of this irq so the release of resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) * and timing data is properly serialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) chip_bus_sync_unlock(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) unregister_handler_proc(irq, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) * Make sure it's not being used on another CPU and if the chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) * supports it also make sure that there is no (not yet serviced)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) * interrupt in flight at the hardware level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) __synchronize_hardirq(desc, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) #ifdef CONFIG_DEBUG_SHIRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) * It's a shared IRQ -- the driver ought to be prepared for an IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) * event to happen even now it's being freed, so let's make sure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) * is so by doing an extra call to the handler ....
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) * ( We do this after actually deregistering it, to make sure that a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) * 'real' IRQ doesn't run in parallel with our fake. )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) if (action->flags & IRQF_SHARED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) action->handler(irq, dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) * The action has already been removed above, but the thread writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) * its oneshot mask bit when it completes. Though request_mutex is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) * held across this which prevents __setup_irq() from handing out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) * the same bit to a newly requested action.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) if (action->thread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) kthread_stop(action->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) put_task_struct(action->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) if (action->secondary && action->secondary->thread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) kthread_stop(action->secondary->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) put_task_struct(action->secondary->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) /* Last action releases resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) if (!desc->action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) * Reaquire bus lock as irq_release_resources() might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) * require it to deallocate resources over the slow bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) chip_bus_lock(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) * There is no interrupt on the fly anymore. Deactivate it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) * completely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) raw_spin_lock_irqsave(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) irq_domain_deactivate_irq(&desc->irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) raw_spin_unlock_irqrestore(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) irq_release_resources(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) chip_bus_sync_unlock(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) irq_remove_timings(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) mutex_unlock(&desc->request_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) irq_chip_pm_put(&desc->irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) module_put(desc->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) kfree(action->secondary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) return action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) * free_irq - free an interrupt allocated with request_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) * @irq: Interrupt line to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) * @dev_id: Device identity to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) * Remove an interrupt handler. The handler is removed and if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) * interrupt line is no longer in use by any driver it is disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) * On a shared IRQ the caller must ensure the interrupt is disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) * on the card it drives before calling this function. The function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) * does not return until any executing interrupts for this IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) * have completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) * This function must not be called from interrupt context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) * Returns the devname argument passed to request_irq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) const void *free_irq(unsigned int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) struct irq_desc *desc = irq_to_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) struct irqaction *action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) const char *devname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) if (WARN_ON(desc->affinity_notify))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) desc->affinity_notify = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) action = __free_irq(desc, dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) if (!action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) devname = action->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) kfree(action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) return devname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) EXPORT_SYMBOL(free_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) /* This function must be called with desc->lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) const char *devname = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) desc->istate &= ~IRQS_NMI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) if (!WARN_ON(desc->action == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) irq_pm_remove_action(desc, desc->action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) devname = desc->action->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) unregister_handler_proc(irq, desc->action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) kfree(desc->action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) desc->action = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) irq_settings_clr_disable_unlazy(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) irq_shutdown_and_deactivate(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) irq_release_resources(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) irq_chip_pm_put(&desc->irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) module_put(desc->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) return devname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) const void *free_nmi(unsigned int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) struct irq_desc *desc = irq_to_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) const void *devname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) /* NMI still enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) if (WARN_ON(desc->depth == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) disable_nmi_nosync(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) raw_spin_lock_irqsave(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) irq_nmi_teardown(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) devname = __cleanup_nmi(irq, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) raw_spin_unlock_irqrestore(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) return devname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) * request_threaded_irq - allocate an interrupt line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) * @irq: Interrupt line to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) * @handler: Function to be called when the IRQ occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) * Primary handler for threaded interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) * If NULL and thread_fn != NULL the default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) * primary handler is installed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) * @thread_fn: Function called from the irq handler thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) * If NULL, no irq thread is created
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) * @irqflags: Interrupt type flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) * @devname: An ascii name for the claiming device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) * @dev_id: A cookie passed back to the handler function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) * This call allocates interrupt resources and enables the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) * interrupt line and IRQ handling. From the point this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) * call is made your handler function may be invoked. Since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) * your handler function must clear any interrupt the board
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) * raises, you must take care both to initialise your hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) * and to set up the interrupt handler in the right order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) * If you want to set up a threaded irq handler for your device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) * then you need to supply @handler and @thread_fn. @handler is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) * still called in hard interrupt context and has to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) * whether the interrupt originates from the device. If yes it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) * needs to disable the interrupt on the device and return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) * IRQ_WAKE_THREAD which will wake up the handler thread and run
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) * @thread_fn. This split handler design is necessary to support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) * shared interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) * Dev_id must be globally unique. Normally the address of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) * device data structure is used as the cookie. Since the handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) * receives this value it makes sense to use it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) * If your interrupt is shared you must pass a non NULL dev_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) * as this is required when freeing the interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) * Flags:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) * IRQF_SHARED Interrupt is shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) * IRQF_TRIGGER_* Specify active edge(s) or level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) int request_threaded_irq(unsigned int irq, irq_handler_t handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) irq_handler_t thread_fn, unsigned long irqflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) const char *devname, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) struct irqaction *action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) struct irq_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) if (irq == IRQ_NOTCONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) * Sanity-check: shared interrupts must pass in a real dev-ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) * otherwise we'll have trouble later trying to figure out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) * which interrupt is which (messes up the interrupt freeing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) * logic etc).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) * it cannot be set along with IRQF_NO_SUSPEND.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) if (((irqflags & IRQF_SHARED) && !dev_id) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) desc = irq_to_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) if (!irq_settings_can_request(desc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) WARN_ON(irq_settings_is_per_cpu_devid(desc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) if (!handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) if (!thread_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) handler = irq_default_primary_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) if (!action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) action->handler = handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) action->thread_fn = thread_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) action->flags = irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) action->name = devname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) action->dev_id = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) retval = irq_chip_pm_get(&desc->irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) if (retval < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) kfree(action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) retval = __setup_irq(irq, desc, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) irq_chip_pm_put(&desc->irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) kfree(action->secondary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) kfree(action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) #ifdef CONFIG_DEBUG_SHIRQ_FIXME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) if (!retval && (irqflags & IRQF_SHARED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) * It's a shared IRQ -- the driver ought to be prepared for it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) * to happen immediately, so let's make sure....
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) * We disable the irq to make sure that a 'real' IRQ doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) * run in parallel with our fake.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) disable_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) handler(irq, dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) enable_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) EXPORT_SYMBOL(request_threaded_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) * request_any_context_irq - allocate an interrupt line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) * @irq: Interrupt line to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) * @handler: Function to be called when the IRQ occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) * Threaded handler for threaded interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) * @flags: Interrupt type flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) * @name: An ascii name for the claiming device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) * @dev_id: A cookie passed back to the handler function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) * This call allocates interrupt resources and enables the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) * interrupt line and IRQ handling. It selects either a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) * hardirq or threaded handling method depending on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) * context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) * On failure, it returns a negative value. On success,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) int request_any_context_irq(unsigned int irq, irq_handler_t handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) unsigned long flags, const char *name, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) struct irq_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) if (irq == IRQ_NOTCONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) desc = irq_to_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) if (irq_settings_is_nested_thread(desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) ret = request_threaded_irq(irq, NULL, handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) flags, name, dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) return !ret ? IRQC_IS_NESTED : ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) ret = request_irq(irq, handler, flags, name, dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) return !ret ? IRQC_IS_HARDIRQ : ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) EXPORT_SYMBOL_GPL(request_any_context_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) * request_nmi - allocate an interrupt line for NMI delivery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) * @irq: Interrupt line to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) * @handler: Function to be called when the IRQ occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) * Threaded handler for threaded interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) * @irqflags: Interrupt type flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) * @name: An ascii name for the claiming device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) * @dev_id: A cookie passed back to the handler function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) * This call allocates interrupt resources and enables the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) * interrupt line and IRQ handling. It sets up the IRQ line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) * to be handled as an NMI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) * An interrupt line delivering NMIs cannot be shared and IRQ handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) * cannot be threaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) * Interrupt lines requested for NMI delivering must produce per cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) * interrupts and have auto enabling setting disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) * Dev_id must be globally unique. Normally the address of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) * device data structure is used as the cookie. Since the handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) * receives this value it makes sense to use it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) * If the interrupt line cannot be used to deliver NMIs, function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) * will fail and return a negative value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) int request_nmi(unsigned int irq, irq_handler_t handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) unsigned long irqflags, const char *name, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) struct irqaction *action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) struct irq_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) if (irq == IRQ_NOTCONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) /* NMI cannot be shared, used for Polling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) if (!(irqflags & IRQF_PERCPU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) if (!handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) desc = irq_to_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) if (!desc || irq_settings_can_autoenable(desc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) !irq_settings_can_request(desc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) !irq_supports_nmi(desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) if (!action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) action->handler = handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) action->name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) action->dev_id = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) retval = irq_chip_pm_get(&desc->irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) retval = __setup_irq(irq, desc, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) goto err_irq_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) raw_spin_lock_irqsave(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) /* Setup NMI state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) desc->istate |= IRQS_NMI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) retval = irq_nmi_setup(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) __cleanup_nmi(irq, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) raw_spin_unlock_irqrestore(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) raw_spin_unlock_irqrestore(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) err_irq_setup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) irq_chip_pm_put(&desc->irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) kfree(action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) void enable_percpu_irq(unsigned int irq, unsigned int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) unsigned int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) * If the trigger type is not specified by the caller, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) * use the default for this interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) type &= IRQ_TYPE_SENSE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) if (type == IRQ_TYPE_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) type = irqd_get_trigger_type(&desc->irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) if (type != IRQ_TYPE_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) ret = __irq_set_trigger(desc, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) WARN(1, "failed to set type for IRQ%d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) irq_percpu_enable(desc, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) irq_put_desc_unlock(desc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) EXPORT_SYMBOL_GPL(enable_percpu_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) void enable_percpu_nmi(unsigned int irq, unsigned int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) enable_percpu_irq(irq, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) * @irq: Linux irq number to check for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) * Must be called from a non migratable context. Returns the enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) * state of a per cpu interrupt on the current cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) bool irq_percpu_is_enabled(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) unsigned int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) struct irq_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) bool is_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) irq_put_desc_unlock(desc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) return is_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) void disable_percpu_irq(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) unsigned int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) irq_percpu_disable(desc, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) irq_put_desc_unlock(desc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) EXPORT_SYMBOL_GPL(disable_percpu_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) void disable_percpu_nmi(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) disable_percpu_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) * Internal function to unregister a percpu irqaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) struct irq_desc *desc = irq_to_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) struct irqaction *action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) raw_spin_lock_irqsave(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) action = desc->action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) if (!action || action->percpu_dev_id != dev_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) WARN(1, "Trying to free already-free IRQ %d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) if (!cpumask_empty(desc->percpu_enabled)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) irq, cpumask_first(desc->percpu_enabled));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) /* Found it - now remove it from the list of entries: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) desc->action = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) desc->istate &= ~IRQS_NMI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) raw_spin_unlock_irqrestore(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) unregister_handler_proc(irq, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) irq_chip_pm_put(&desc->irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) module_put(desc->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) return action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) raw_spin_unlock_irqrestore(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) * remove_percpu_irq - free a per-cpu interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) * @irq: Interrupt line to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) * @act: irqaction for the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) * Used to remove interrupts statically setup by the early boot process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) void remove_percpu_irq(unsigned int irq, struct irqaction *act)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) struct irq_desc *desc = irq_to_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) if (desc && irq_settings_is_per_cpu_devid(desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) __free_percpu_irq(irq, act->percpu_dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) * free_percpu_irq - free an interrupt allocated with request_percpu_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) * @irq: Interrupt line to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) * @dev_id: Device identity to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) * Remove a percpu interrupt handler. The handler is removed, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) * the interrupt line is not disabled. This must be done on each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) * CPU before calling this function. The function does not return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) * until any executing interrupts for this IRQ have completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) * This function must not be called from interrupt context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) struct irq_desc *desc = irq_to_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) if (!desc || !irq_settings_is_per_cpu_devid(desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) chip_bus_lock(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) kfree(__free_percpu_irq(irq, dev_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) chip_bus_sync_unlock(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) EXPORT_SYMBOL_GPL(free_percpu_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) struct irq_desc *desc = irq_to_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) if (!desc || !irq_settings_is_per_cpu_devid(desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) if (WARN_ON(!(desc->istate & IRQS_NMI)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) kfree(__free_percpu_irq(irq, dev_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) * setup_percpu_irq - setup a per-cpu interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) * @irq: Interrupt line to setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) * @act: irqaction for the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) * Used to statically setup per-cpu interrupts in the early boot process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) int setup_percpu_irq(unsigned int irq, struct irqaction *act)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) struct irq_desc *desc = irq_to_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) if (!desc || !irq_settings_is_per_cpu_devid(desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) retval = irq_chip_pm_get(&desc->irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) retval = __setup_irq(irq, desc, act);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) irq_chip_pm_put(&desc->irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) * __request_percpu_irq - allocate a percpu interrupt line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) * @irq: Interrupt line to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) * @handler: Function to be called when the IRQ occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) * @flags: Interrupt type flags (IRQF_TIMER only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) * @devname: An ascii name for the claiming device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) * @dev_id: A percpu cookie passed back to the handler function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) * This call allocates interrupt resources and enables the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) * interrupt on the local CPU. If the interrupt is supposed to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) * enabled on other CPUs, it has to be done on each CPU using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) * enable_percpu_irq().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) * Dev_id must be globally unique. It is a per-cpu variable, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) * the handler gets called with the interrupted CPU's instance of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) * that variable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) unsigned long flags, const char *devname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) void __percpu *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) struct irqaction *action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) struct irq_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) if (!dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) desc = irq_to_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) if (!desc || !irq_settings_can_request(desc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) !irq_settings_is_per_cpu_devid(desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) if (flags && flags != IRQF_TIMER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) if (!action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) action->handler = handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) action->name = devname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) action->percpu_dev_id = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) retval = irq_chip_pm_get(&desc->irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) if (retval < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) kfree(action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) retval = __setup_irq(irq, desc, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) irq_chip_pm_put(&desc->irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) kfree(action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) EXPORT_SYMBOL_GPL(__request_percpu_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) * @irq: Interrupt line to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) * @handler: Function to be called when the IRQ occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) * @name: An ascii name for the claiming device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) * @dev_id: A percpu cookie passed back to the handler function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) * have to be setup on each CPU by calling prepare_percpu_nmi() before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) * being enabled on the same CPU by using enable_percpu_nmi().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) * Dev_id must be globally unique. It is a per-cpu variable, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) * the handler gets called with the interrupted CPU's instance of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) * that variable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) * Interrupt lines requested for NMI delivering should have auto enabling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) * setting disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) * If the interrupt line cannot be used to deliver NMIs, function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) * will fail returning a negative value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) const char *name, void __percpu *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) struct irqaction *action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) struct irq_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) if (!handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) desc = irq_to_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) if (!desc || !irq_settings_can_request(desc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) !irq_settings_is_per_cpu_devid(desc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) irq_settings_can_autoenable(desc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) !irq_supports_nmi(desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) /* The line cannot already be NMI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) if (desc->istate & IRQS_NMI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) if (!action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) action->handler = handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) | IRQF_NOBALANCING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) action->name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) action->percpu_dev_id = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) retval = irq_chip_pm_get(&desc->irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) retval = __setup_irq(irq, desc, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) goto err_irq_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) raw_spin_lock_irqsave(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) desc->istate |= IRQS_NMI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) raw_spin_unlock_irqrestore(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) err_irq_setup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) irq_chip_pm_put(&desc->irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) kfree(action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) * prepare_percpu_nmi - performs CPU local setup for NMI delivery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) * @irq: Interrupt line to prepare for NMI delivery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) * This call prepares an interrupt line to deliver NMI on the current CPU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) * before that interrupt line gets enabled with enable_percpu_nmi().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) * As a CPU local operation, this should be called from non-preemptible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) * context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) * If the interrupt line cannot be used to deliver NMIs, function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) * will fail returning a negative value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) int prepare_percpu_nmi(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) struct irq_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) WARN_ON(preemptible());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) desc = irq_get_desc_lock(irq, &flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) IRQ_GET_DESC_CHECK_PERCPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) if (WARN(!(desc->istate & IRQS_NMI),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) irq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) ret = irq_nmi_setup(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) pr_err("Failed to setup NMI delivery: irq %u\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) irq_put_desc_unlock(desc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) * teardown_percpu_nmi - undoes NMI setup of IRQ line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) * @irq: Interrupt line from which CPU local NMI configuration should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) * removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) * This call undoes the setup done by prepare_percpu_nmi().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) * IRQ line should not be enabled for the current CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) * As a CPU local operation, this should be called from non-preemptible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) * context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) void teardown_percpu_nmi(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) struct irq_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) WARN_ON(preemptible());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) desc = irq_get_desc_lock(irq, &flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) IRQ_GET_DESC_CHECK_PERCPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) if (WARN_ON(!(desc->istate & IRQS_NMI)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) irq_nmi_teardown(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) irq_put_desc_unlock(desc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) bool *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) struct irq_chip *chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) chip = irq_data_get_irq_chip(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) if (WARN_ON_ONCE(!chip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) if (chip->irq_get_irqchip_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) data = data->parent_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) } while (data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) err = chip->irq_get_irqchip_state(data, which, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) * irq_get_irqchip_state - returns the irqchip state of a interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) * @irq: Interrupt line that is forwarded to a VM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) * @which: One of IRQCHIP_STATE_* the caller wants to know about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) * @state: a pointer to a boolean where the state is to be storeed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) * This call snapshots the internal irqchip state of an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) * interrupt, returning into @state the bit corresponding to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) * stage @which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) * This function should be called with preemption disabled if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) * interrupt controller has per-cpu registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) bool *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) struct irq_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) struct irq_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) desc = irq_get_desc_buslock(irq, &flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) data = irq_desc_get_irq_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) err = __irq_get_irqchip_state(data, which, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) irq_put_desc_busunlock(desc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) * irq_set_irqchip_state - set the state of a forwarded interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) * @irq: Interrupt line that is forwarded to a VM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) * @which: State to be restored (one of IRQCHIP_STATE_*)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) * @val: Value corresponding to @which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) * This call sets the internal irqchip state of an interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) * depending on the value of @which.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) * This function should be called with preemption disabled if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) * interrupt controller has per-cpu registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) bool val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) struct irq_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) struct irq_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) struct irq_chip *chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) desc = irq_get_desc_buslock(irq, &flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) data = irq_desc_get_irq_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) chip = irq_data_get_irq_chip(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) if (WARN_ON_ONCE(!chip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) if (chip->irq_set_irqchip_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) data = data->parent_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) } while (data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) err = chip->irq_set_irqchip_state(data, which, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) irq_put_desc_busunlock(desc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) EXPORT_SYMBOL_GPL(irq_set_irqchip_state);