^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "internals.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * irq_fixup_move_pending - Cleanup irq move pending from a dying CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * @desc: Interrupt descpriptor to clean up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * @force_clear: If set clear the move pending bit unconditionally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * If not set, clear it only when the dying CPU is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * last one in the pending mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Returns true if the pending bit was set and the pending mask contains an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * online CPU other than the dying CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct irq_data *data = irq_desc_get_irq_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) if (!irqd_is_setaffinity_pending(data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * The outgoing CPU might be the last online target in a pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * interrupt move. If that's the case clear the pending move bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) if (cpumask_any_and(desc->pending_mask, cpu_online_mask) >= nr_cpu_ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) irqd_clr_move_pending(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) if (force_clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) irqd_clr_move_pending(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) void irq_move_masked_irq(struct irq_data *idata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct irq_desc *desc = irq_data_to_desc(idata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct irq_data *data = &desc->irq_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct irq_chip *chip = data->chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (likely(!irqd_is_setaffinity_pending(data)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) irqd_clr_move_pending(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (irqd_is_per_cpu(data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (unlikely(cpumask_empty(desc->pending_mask)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (!chip->irq_set_affinity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) assert_raw_spin_locked(&desc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * If there was a valid mask to work with, please
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * do the disable, re-program, enable sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * This is *not* particularly important for level triggered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * but in a edge trigger case, we might be setting rte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * when an active trigger is coming in. This could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * cause some ioapics to mal-function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * Being paranoid i guess!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * For correct operation this depends on the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * masking the irqs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) ret = irq_do_set_affinity(data, desc->pending_mask, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * If the there is a cleanup pending in the underlying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * vector management, reschedule the move for the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * interrupt. Leave desc->pending_mask intact.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (ret == -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) irqd_set_move_pending(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) cpumask_clear(desc->pending_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) void __irq_move_irq(struct irq_data *idata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) bool masked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (unlikely(irqd_irq_disabled(idata)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * Be careful vs. already masked interrupts. If this is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * threaded interrupt with ONESHOT set, we can end up with an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * interrupt storm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) masked = irqd_irq_masked(idata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (!masked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) idata->chip->irq_mask(idata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) irq_move_masked_irq(idata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (!masked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) idata->chip->irq_unmask(idata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }