^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2016,2017 IBM Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define pr_fmt(fmt) "xive: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/threads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/msi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/machdep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/xive.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/xive-regs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/xmon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "xive-internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #undef DEBUG_FLUSH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #undef DEBUG_ALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #ifdef DEBUG_ALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define DBG_VERBOSE(fmt, ...) pr_devel("cpu %d - " fmt, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) smp_processor_id(), ## __VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define DBG_VERBOSE(fmt...) do { } while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) bool __xive_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) EXPORT_SYMBOL_GPL(__xive_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) bool xive_cmdline_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* We use only one priority for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static u8 xive_irq_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* TIMA exported to KVM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) void __iomem *xive_tima;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) EXPORT_SYMBOL_GPL(xive_tima);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) u32 xive_tima_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* Backend ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static const struct xive_ops *xive_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* Our global interrupt domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static struct irq_domain *xive_irq_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /* The IPIs all use the same logical irq number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static u32 xive_ipi_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* Xive state for each CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* An invalid CPU target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define XIVE_INVALID_TARGET (-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * Read the next entry in a queue, return its content if it's valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * or 0 if there is no new entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * The queue pointer is moved forward unless "just_peek" is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static u32 xive_read_eq(struct xive_q *q, bool just_peek)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) u32 cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (!q->qpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) cur = be32_to_cpup(q->qpage + q->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* Check valid bit (31) vs current toggle polarity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if ((cur >> 31) == q->toggle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* If consuming from the queue ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (!just_peek) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* Next entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) q->idx = (q->idx + 1) & q->msk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* Wrap around: flip valid toggle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (q->idx == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) q->toggle ^= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* Mask out the valid bit (31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return cur & 0x7fffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * Scans all the queue that may have interrupts in them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * (based on "pending_prio") in priority order until an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * interrupt is found or all the queues are empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * Then updates the CPPR (Current Processor Priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * Register) based on the most favored interrupt found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * (0xff if none) and return what was found (0 if none).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * If just_peek is set, return the most favored pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * interrupt if any but don't update the queue pointers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * Note: This function can operate generically on any number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * of queues (up to 8). The current implementation of the XIVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * driver only uses a single queue however.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * Note2: This will also "flush" "the pending_count" of a queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * into the "count" when that queue is observed to be empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * This is used to keep track of the amount of interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * targetting a queue. When an interrupt is moved away from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * a queue, we only decrement that queue count once the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * has been observed empty to avoid races.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) u32 irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) u8 prio = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /* Find highest pending priority */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) while (xc->pending_prio != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct xive_q *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) prio = ffs(xc->pending_prio) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) DBG_VERBOSE("scan_irq: trying prio %d\n", prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* Try to fetch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) irq = xive_read_eq(&xc->queue[prio], just_peek);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* Found something ? That's it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (just_peek || irq_to_desc(irq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * We should never get here; if we do then we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * have failed to synchronize the interrupt properly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * when shutting it down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) pr_crit("xive: got interrupt %d without descriptor, dropping\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* Clear pending bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) xc->pending_prio &= ~(1 << prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * Check if the queue count needs adjusting due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * interrupts being moved away. See description of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * xive_dec_target_count()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) q = &xc->queue[prio];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (atomic_read(&q->pending_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) int p = atomic_xchg(&q->pending_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) WARN_ON(p > atomic_read(&q->count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) atomic_sub(p, &q->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* If nothing was found, set CPPR to 0xff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (irq == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) prio = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /* Update HW CPPR to match if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (prio != xc->cppr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) xc->cppr = prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) out_8(xive_tima + xive_tima_offset + TM_CPPR, prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * This is used to perform the magic loads from an ESB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * described in xive-regs.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) offset |= XIVE_ESB_LD_ST_MO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /* Handle HW errata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) offset |= offset << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) val = in_be64(xd->eoi_mmio + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return (u8)val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* Handle HW errata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) offset |= offset << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) xive_ops->esb_rw(xd->hw_irq, offset, data, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) out_be64(xd->eoi_mmio + offset, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) #ifdef CONFIG_XMON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static notrace void xive_dump_eq(const char *name, struct xive_q *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) u32 i0, i1, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (!q->qpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) idx = q->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) i0 = be32_to_cpup(q->qpage + idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) idx = (idx + 1) & q->msk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) i1 = be32_to_cpup(q->qpage + idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) xmon_printf("%s idx=%d T=%d %08x %08x ...", name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) q->idx, q->toggle, i0, i1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) notrace void xmon_xive_do_dump(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) xmon_printf("CPU %d:", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (xc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) xmon_printf("pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) xmon_printf("IPI=0x%08x PQ=%c%c ", xc->hw_ipi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) val & XIVE_ESB_VAL_P ? 'P' : '-',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) val & XIVE_ESB_VAL_Q ? 'Q' : '-');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) xive_dump_eq("EQ", &xc->queue[xive_irq_priority]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) xmon_printf("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static struct irq_data *xive_get_irq_data(u32 hw_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) unsigned int irq = irq_find_mapping(xive_irq_domain, hw_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return irq ? irq_get_irq_data(irq) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) u32 target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) u8 prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) u32 lirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) hw_irq, target, prio, lirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) d = xive_get_irq_data(hw_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (d) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) u64 val = xive_esb_read(xd, XIVE_ESB_GET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) xmon_printf("flags=%c%c%c PQ=%c%c",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) val & XIVE_ESB_VAL_P ? 'P' : '-',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) val & XIVE_ESB_VAL_Q ? 'Q' : '-');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) xmon_printf("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) #endif /* CONFIG_XMON */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static unsigned int xive_get_irq(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct xive_cpu *xc = __this_cpu_read(xive_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) u32 irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * This can be called either as a result of a HW interrupt or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * as a "replay" because EOI decided there was still something
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * in one of the queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * First we perform an ACK cycle in order to update our mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * of pending priorities. This will also have the effect of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * updating the CPPR to the most favored pending interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * In the future, if we have a way to differentiate a first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * entry (on HW interrupt) from a replay triggered by EOI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * we could skip this on replays unless we soft-mask tells us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * that a new HW interrupt occurred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) xive_ops->update_pending(xc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* Scan our queue(s) for interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) irq = xive_scan_interrupts(xc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) irq, xc->pending_prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* Return pending interrupt if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (irq == XIVE_BAD_IRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * After EOI'ing an interrupt, we need to re-check the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * to see if another interrupt is pending since multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * interrupts can coalesce into a single notification to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * If we find that there is indeed more in there, we call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * force_external_irq_replay() to make Linux synthetize an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * external interrupt on the next call to local_irq_restore().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static void xive_do_queue_eoi(struct xive_cpu *xc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (xive_scan_interrupts(xc, true) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) force_external_irq_replay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * EOI an interrupt at the source. There are several methods
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * to do this depending on the HW version and source type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) xd->stale_p = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /* If the XIVE supports the new "store EOI facility, use it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * The FW told us to call it. This happens for some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * interrupt sources that need additional HW whacking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * beyond the ESB manipulation. For example LPC interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * on P9 DD1.0 needed a latch to be clared in the LPC bridge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * itself. The Firmware will take care of it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (WARN_ON_ONCE(!xive_ops->eoi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) xive_ops->eoi(hw_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) u8 eoi_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * Otherwise for EOI, we use the special MMIO that does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * a clear of both P and Q and returns the old Q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * except for LSIs where we use the "EOI cycle" special
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * load.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * This allows us to then do a re-trigger if Q was set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * rather than synthesizing an interrupt in software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * For LSIs the HW EOI cycle is used rather than PQ bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * as they are automatically re-triggred in HW when still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (xd->flags & XIVE_IRQ_FLAG_LSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) xive_esb_read(xd, XIVE_ESB_LOAD_EOI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) DBG_VERBOSE("eoi_val=%x\n", eoi_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /* Re-trigger if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) out_be64(xd->trig_mmio, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /* irq_chip eoi callback, called with irq descriptor lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static void xive_irq_eoi(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct xive_cpu *xc = __this_cpu_read(xive_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) d->irq, irqd_to_hwirq(d), xc->pending_prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * EOI the source if it hasn't been disabled and hasn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * been passed-through to a KVM guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) !(xd->flags & XIVE_IRQ_NO_EOI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) xive_do_source_eoi(irqd_to_hwirq(d), xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) xd->stale_p = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * Clear saved_p to indicate that it's no longer occupying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * a queue slot on the target queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) xd->saved_p = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) /* Check for more work in the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) xive_do_queue_eoi(xc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * Helper used to mask and unmask an interrupt source. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * is only called for normal interrupts that do not require
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * masking/unmasking via firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) static void xive_do_source_set_mask(struct xive_irq_data *xd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) bool mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * If the interrupt had P set, it may be in a queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * We need to make sure we don't re-enable it until it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * has been fetched from that queue and EOId. We keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * a copy of that P state and use it to restore the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * ESB accordingly on unmask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (!xd->stale_p && !!(val & XIVE_ESB_VAL_P))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) xd->saved_p = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) xd->stale_p = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) } else if (xd->saved_p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) xd->saved_p = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) xd->stale_p = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * Try to chose "cpu" as a new interrupt target. Increments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * the queue accounting for that target if it's not already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static bool xive_try_pick_target(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct xive_q *q = &xc->queue[xive_irq_priority];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) int max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * Calculate max number of interrupts in that queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * We leave a gap of 1 just in case...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) max = (q->msk + 1) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return !!atomic_add_unless(&q->count, 1, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * Un-account an interrupt for a target CPU. We don't directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * decrement q->count since the interrupt might still be present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * in the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * Instead increment a separate counter "pending_count" which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * will be substracted from "count" later when that CPU observes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * the queue to be empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) static void xive_dec_target_count(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct xive_q *q = &xc->queue[xive_irq_priority];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (WARN_ON(cpu < 0 || !xc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * We increment the "pending count" which will be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * to decrement the target queue count whenever it's next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * processed and found empty. This ensure that we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * decrement while we still have the interrupt there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * occupying a slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) atomic_inc(&q->pending_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /* Find a tentative CPU target in a CPU mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) static int xive_find_target_in_mask(const struct cpumask *mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) unsigned int fuzz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) int cpu, first, num, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) /* Pick up a starting point CPU in the mask based on fuzz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) num = min_t(int, cpumask_weight(mask), nr_cpu_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) first = fuzz % num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* Locate it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) cpu = cpumask_first(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) for (i = 0; i < first && cpu < nr_cpu_ids; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) cpu = cpumask_next(cpu, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /* Sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (WARN_ON(cpu >= nr_cpu_ids))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) cpu = cpumask_first(cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /* Remember first one to handle wrap-around */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) first = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * Now go through the entire mask until we find a valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * We re-check online as the fallback case passes us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * an untested affinity mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (cpu_online(cpu) && xive_try_pick_target(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) cpu = cpumask_next(cpu, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /* Wrap around */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (cpu >= nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) cpu = cpumask_first(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) } while (cpu != first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * Pick a target CPU for an interrupt. This is done at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * startup or if the affinity is changed in a way that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * invalidates the current target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) static int xive_pick_irq_target(struct irq_data *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) const struct cpumask *affinity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) static unsigned int fuzz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) cpumask_var_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) int cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * If we have chip IDs, first we try to build a mask of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * CPUs matching the CPU and find a target in there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (xd->src_chip != XIVE_INVALID_CHIP_ID &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) zalloc_cpumask_var(&mask, GFP_ATOMIC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) /* Build a mask of matching chip IDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) for_each_cpu_and(cpu, affinity, cpu_online_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (xc->chip_id == xd->src_chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) cpumask_set_cpu(cpu, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /* Try to find a target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (cpumask_empty(mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) cpu = xive_find_target_in_mask(mask, fuzz++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) free_cpumask_var(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (cpu >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) fuzz--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /* No chip IDs, fallback to using the affinity mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return xive_find_target_in_mask(affinity, fuzz++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static unsigned int xive_irq_startup(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) int target, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) xd->saved_p = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) xd->stale_p = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) d->irq, hw_irq, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) #ifdef CONFIG_PCI_MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * The generic MSI code returns with the interrupt disabled on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * card, using the MSI mask bits. Firmware doesn't appear to unmask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * at that level, so we do it here by hand.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (irq_data_get_msi_desc(d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) pci_msi_unmask_irq(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /* Pick a target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (target == XIVE_INVALID_TARGET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) /* Try again breaking affinity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) target = xive_pick_irq_target(d, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (target == XIVE_INVALID_TARGET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) pr_warn("irq %d started with broken affinity\n", d->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /* Sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (WARN_ON(target == XIVE_INVALID_TARGET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) target >= nr_cpu_ids))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) target = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) xd->target = target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * Configure the logical number to be the Linux IRQ number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * and set the target queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) rc = xive_ops->configure_irq(hw_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) get_hard_smp_processor_id(target),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) xive_irq_priority, d->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /* Unmask the ESB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) xive_do_source_set_mask(xd, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) /* called with irq descriptor lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) static void xive_irq_shutdown(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) d->irq, hw_irq, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (WARN_ON(xd->target == XIVE_INVALID_TARGET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /* Mask the interrupt at the source */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) xive_do_source_set_mask(xd, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * Mask the interrupt in HW in the IVT/EAS and set the number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * to be the "bad" IRQ number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) xive_ops->configure_irq(hw_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) get_hard_smp_processor_id(xd->target),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 0xff, XIVE_BAD_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) xive_dec_target_count(xd->target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) xd->target = XIVE_INVALID_TARGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) static void xive_irq_unmask(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * This is a workaround for PCI LSI problems on P9, for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * these, we call FW to set the mask. The problems might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * be fixed by P9 DD2.0, if that is the case, firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * will no longer set that flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) xive_ops->configure_irq(hw_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) get_hard_smp_processor_id(xd->target),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) xive_irq_priority, d->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) xive_do_source_set_mask(xd, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) static void xive_irq_mask(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * This is a workaround for PCI LSI problems on P9, for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * these, we call OPAL to set the mask. The problems might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * be fixed by P9 DD2.0, if that is the case, firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * will no longer set that flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) xive_ops->configure_irq(hw_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) get_hard_smp_processor_id(xd->target),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 0xff, d->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) xive_do_source_set_mask(xd, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) static int xive_irq_set_affinity(struct irq_data *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) const struct cpumask *cpumask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) u32 target, old_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) pr_devel("xive_irq_set_affinity: irq %d\n", d->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /* Is this valid ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) /* Don't do anything if the interrupt isn't started */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (!irqd_is_started(d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) return IRQ_SET_MASK_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * If existing target is already in the new mask, and is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * online then do nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (xd->target != XIVE_INVALID_TARGET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) cpu_online(xd->target) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) cpumask_test_cpu(xd->target, cpumask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return IRQ_SET_MASK_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* Pick a new target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) target = xive_pick_irq_target(d, cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /* No target found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (target == XIVE_INVALID_TARGET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) /* Sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (WARN_ON(target >= nr_cpu_ids))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) target = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) old_target = xd->target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * Only configure the irq if it's not currently passed-through to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * a KVM guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (!irqd_is_forwarded_to_vcpu(d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) rc = xive_ops->configure_irq(hw_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) get_hard_smp_processor_id(target),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) xive_irq_priority, d->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) pr_err("Error %d reconfiguring irq %d\n", rc, d->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) pr_devel(" target: 0x%x\n", target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) xd->target = target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) /* Give up previous target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (old_target != XIVE_INVALID_TARGET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) xive_dec_target_count(old_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return IRQ_SET_MASK_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * We only support these. This has really no effect other than setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * the corresponding descriptor bits mind you but those will in turn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * affect the resend function when re-enabling an edge interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * Set set the default to edge as explained in map().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) flow_type = IRQ_TYPE_EDGE_RISING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (flow_type != IRQ_TYPE_EDGE_RISING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) flow_type != IRQ_TYPE_LEVEL_LOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) irqd_set_trigger_type(d, flow_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * Double check it matches what the FW thinks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * NOTE: We don't know yet if the PAPR interface will provide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * the LSI vs MSI information apart from the device-tree so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * this check might have to move into an optional backend call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * that is specific to the native backend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if ((flow_type == IRQ_TYPE_LEVEL_LOW) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) !!(xd->flags & XIVE_IRQ_FLAG_LSI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) d->irq, (u32)irqd_to_hwirq(d),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) (flow_type == IRQ_TYPE_LEVEL_LOW) ? "Level" : "Edge",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) (xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) return IRQ_SET_MASK_OK_NOCOPY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) static int xive_irq_retrigger(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) /* This should be only for MSIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * To perform a retrigger, we first set the PQ bits to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * 11, then perform an EOI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * Note: We pass "0" to the hw_irq argument in order to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * avoid calling into the backend EOI code which we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * want to do in the case of a re-trigger. Backends typically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * only do EOI for LSIs anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) xive_do_source_eoi(0, xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * Caller holds the irq descriptor lock, so this won't be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * concurrently with xive_get_irqchip_state on the same interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) u8 pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * We only support this on interrupts that do not require
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * firmware calls for masking and unmasking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (xd->flags & XIVE_IRQ_FLAG_MASK_FW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * This is called by KVM with state non-NULL for enabling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * pass-through or NULL for disabling it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) irqd_set_forwarded_to_vcpu(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) /* Set it to PQ=10 state to prevent further sends */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (!xd->stale_p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) xd->saved_p = !!(pq & XIVE_ESB_VAL_P);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) xd->stale_p = !xd->saved_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) /* No target ? nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (xd->target == XIVE_INVALID_TARGET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * An untargetted interrupt should have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * also masked at the source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) WARN_ON(xd->saved_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * If P was set, adjust state to PQ=11 to indicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * that a resend is needed for the interrupt to reach
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * the guest. Also remember the value of P.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * This also tells us that it's in flight to a host queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * or has already been fetched but hasn't been EOIed yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * by the host. This it's potentially using up a host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * queue slot. This is important to know because as long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * as this is the case, we must not hard-unmask it when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * "returning" that interrupt to the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * This saved_p is cleared by the host EOI, when we know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * for sure the queue slot is no longer in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (xd->saved_p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * Sync the XIVE source HW to ensure the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * has gone through the EAS before we change its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * target to the guest. That should guarantee us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * that we *will* eventually get an EOI for it on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * the host. Otherwise there would be a small window
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * for P to be seen here but the interrupt going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * to the guest queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (xive_ops->sync_source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) xive_ops->sync_source(hw_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) irqd_clr_forwarded_to_vcpu(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /* No host target ? hard mask and return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (xd->target == XIVE_INVALID_TARGET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) xive_do_source_set_mask(xd, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * Sync the XIVE source HW to ensure the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * has gone through the EAS before we change its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * target to the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (xive_ops->sync_source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) xive_ops->sync_source(hw_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * By convention we are called with the interrupt in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * a PQ=10 or PQ=11 state, ie, it won't fire and will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * have latched in Q whether there's a pending HW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * interrupt or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * First reconfigure the target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) rc = xive_ops->configure_irq(hw_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) get_hard_smp_processor_id(xd->target),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) xive_irq_priority, d->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * Then if saved_p is not set, effectively re-enable the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * interrupt with an EOI. If it is set, we know there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * still a message in a host queue somewhere that will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * EOId eventually.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * Note: We don't check irqd_irq_disabled(). Effectively,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * we *will* let the irq get through even if masked if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * HW is still firing it in order to deal with the whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * saved_p business properly. If the interrupt triggers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * while masked, the generic code will re-mask it anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (!xd->saved_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) xive_do_source_eoi(hw_irq, xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) /* Called with irq descriptor lock held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) static int xive_get_irqchip_state(struct irq_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) enum irqchip_irq_state which, bool *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) struct xive_irq_data *xd = irq_data_get_irq_handler_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) u8 pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) switch (which) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) case IRQCHIP_STATE_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) pq = xive_esb_read(xd, XIVE_ESB_GET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * The esb value being all 1's means we couldn't get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * the PQ state of the interrupt through mmio. It may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * happen, for example when querying a PHB interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * while the PHB is in an error state. We consider the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * interrupt to be inactive in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) (xd->saved_p || (!!(pq & XIVE_ESB_VAL_P) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) !irqd_irq_disabled(data)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) static struct irq_chip xive_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) .name = "XIVE-IRQ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) .irq_startup = xive_irq_startup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) .irq_shutdown = xive_irq_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) .irq_eoi = xive_irq_eoi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) .irq_mask = xive_irq_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) .irq_unmask = xive_irq_unmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) .irq_set_affinity = xive_irq_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) .irq_set_type = xive_irq_set_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) .irq_retrigger = xive_irq_retrigger,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) .irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) .irq_get_irqchip_state = xive_get_irqchip_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) bool is_xive_irq(struct irq_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) return chip == &xive_irq_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) EXPORT_SYMBOL_GPL(is_xive_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) void xive_cleanup_irq_data(struct xive_irq_data *xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (xd->eoi_mmio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) unmap_kernel_range((unsigned long)xd->eoi_mmio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 1u << xd->esb_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) iounmap(xd->eoi_mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) if (xd->eoi_mmio == xd->trig_mmio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) xd->trig_mmio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) xd->eoi_mmio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) if (xd->trig_mmio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) unmap_kernel_range((unsigned long)xd->trig_mmio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 1u << xd->esb_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) iounmap(xd->trig_mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) xd->trig_mmio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) EXPORT_SYMBOL_GPL(xive_cleanup_irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) struct xive_irq_data *xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (!xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) rc = xive_ops->populate_irq_data(hw, xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) kfree(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) xd->target = XIVE_INVALID_TARGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) irq_set_handler_data(virq, xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * Turn OFF by default the interrupt being mapped. A side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * effect of this check is the mapping the ESB page of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * interrupt in the Linux address space. This prevents page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * fault issues in the crash handler which masks all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) static void xive_irq_free_data(unsigned int virq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) struct xive_irq_data *xd = irq_get_handler_data(virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (!xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) irq_set_handler_data(virq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) xive_cleanup_irq_data(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) kfree(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) static void xive_cause_ipi(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) struct xive_cpu *xc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) struct xive_irq_data *xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) xc = per_cpu(xive_cpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) smp_processor_id(), cpu, xc->hw_ipi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) xd = &xc->ipi_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (WARN_ON(!xd->trig_mmio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) out_be64(xd->trig_mmio, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) static irqreturn_t xive_muxed_ipi_action(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) return smp_ipi_demux();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) static void xive_ipi_eoi(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) struct xive_cpu *xc = __this_cpu_read(xive_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) /* Handle possible race with unplug and drop stale IPIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (!xc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) xive_do_source_eoi(xc->hw_ipi, &xc->ipi_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) xive_do_queue_eoi(xc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) static void xive_ipi_do_nothing(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * Nothing to do, we never mask/unmask IPIs, but the callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * has to exist for the struct irq_chip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) static struct irq_chip xive_ipi_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) .name = "XIVE-IPI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) .irq_eoi = xive_ipi_eoi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) .irq_mask = xive_ipi_do_nothing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) .irq_unmask = xive_ipi_do_nothing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) static void __init xive_request_ipi(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) unsigned int virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * Initialization failed, move on, we might manage to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * reach the point where we display our errors before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * the system falls appart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (!xive_irq_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) /* Initialize it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) virq = irq_create_mapping(xive_irq_domain, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) xive_ipi_irq = virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) WARN_ON(request_irq(virq, xive_muxed_ipi_action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) static int xive_setup_cpu_ipi(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) struct xive_cpu *xc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) pr_debug("Setting up IPI for CPU %d\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) xc = per_cpu(xive_cpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) /* Check if we are already setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) if (xc->hw_ipi != XIVE_BAD_IRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) /* Grab an IPI from the backend, this will populate xc->hw_ipi */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (xive_ops->get_ipi(cpu, xc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) * Populate the IRQ data in the xive_cpu structure and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) * configure the HW / enable the IPIs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) pr_err("Failed to populate IPI data on CPU %d\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) rc = xive_ops->configure_irq(xc->hw_ipi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) get_hard_smp_processor_id(cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) xive_irq_priority, xive_ipi_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) pr_err("Failed to map IPI CPU %d\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) /* Unmask it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) xive_do_source_set_mask(&xc->ipi_data, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) /* Disable the IPI and free the IRQ data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) /* Already cleaned up ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) if (xc->hw_ipi == XIVE_BAD_IRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) /* Mask the IPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) xive_do_source_set_mask(&xc->ipi_data, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * Note: We don't call xive_cleanup_irq_data() to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * the mappings as this is called from an IPI on kexec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) * which is not a safe environment to call iounmap()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) /* Deconfigure/mask in the backend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 0xff, xive_ipi_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /* Free the IPIs in the backend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) xive_ops->put_ipi(cpu, xc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) void __init xive_smp_probe(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) smp_ops->cause_ipi = xive_cause_ipi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) /* Register the IPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) xive_request_ipi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) /* Allocate and setup IPI for the boot CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) xive_setup_cpu_ipi(smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) irq_hw_number_t hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) * Mark interrupts as edge sensitive by default so that resend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) * actually works. Will fix that up below if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) irq_clear_status_flags(virq, IRQ_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) /* IPIs are special and come up with HW number 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (hw == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) * IPIs are marked per-cpu. We use separate HW interrupts under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) * the hood but associated with the same "linux" interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) irq_set_chip_and_handler(virq, &xive_ipi_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) handle_percpu_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) rc = xive_irq_alloc_data(virq, hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) struct irq_data *data = irq_get_irq_data(virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) unsigned int hw_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) /* XXX Assign BAD number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) hw_irq = (unsigned int)irqd_to_hwirq(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) if (hw_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) xive_irq_free_data(virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) const u32 *intspec, unsigned int intsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) irq_hw_number_t *out_hwirq, unsigned int *out_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) *out_hwirq = intspec[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * If intsize is at least 2, we look for the type in the second cell,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) * we assume the LSB indicates a level interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) if (intsize > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (intspec[1] & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) *out_flags = IRQ_TYPE_LEVEL_LOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) *out_flags = IRQ_TYPE_EDGE_RISING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) *out_flags = IRQ_TYPE_LEVEL_LOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) enum irq_domain_bus_token bus_token)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) return xive_ops->match(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) static const struct irq_domain_ops xive_irq_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) .match = xive_irq_domain_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) .map = xive_irq_domain_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) .unmap = xive_irq_domain_unmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) .xlate = xive_irq_domain_xlate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) static void __init xive_init_host(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) xive_irq_domain = irq_domain_add_nomap(NULL, XIVE_MAX_IRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) &xive_irq_domain_ops, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) if (WARN_ON(xive_irq_domain == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) irq_set_default_host(xive_irq_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) if (xc->queue[xive_irq_priority].qpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) xive_ops->cleanup_queue(cpu, xc, xive_irq_priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) /* We setup 1 queues for now with a 64k page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if (!xc->queue[xive_irq_priority].qpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) static int xive_prepare_cpu(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) struct xive_cpu *xc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) xc = per_cpu(xive_cpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) if (!xc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) xc = kzalloc_node(sizeof(struct xive_cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) GFP_KERNEL, cpu_to_node(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if (!xc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) np = of_get_cpu_node(cpu, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) if (np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) xc->chip_id = of_get_ibm_chip_id(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) xc->hw_ipi = XIVE_BAD_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) per_cpu(xive_cpu, cpu) = xc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) /* Setup EQs if not already */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) return xive_setup_cpu_queues(cpu, xc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) static void xive_setup_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) struct xive_cpu *xc = __this_cpu_read(xive_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) /* The backend might have additional things to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) if (xive_ops->setup_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) xive_ops->setup_cpu(smp_processor_id(), xc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) /* Set CPPR to 0xff to enable flow of interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) xc->cppr = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) void xive_smp_setup_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) pr_devel("SMP setup CPU %d\n", smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) /* This will have already been done on the boot CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) if (smp_processor_id() != boot_cpuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) xive_setup_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) int xive_smp_prepare_cpu(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) /* Allocate per-CPU data and queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) rc = xive_prepare_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) /* Allocate and setup IPI for the new CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) return xive_setup_cpu_ipi(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) u32 irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) /* We assume local irqs are disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) WARN_ON(!irqs_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) /* Check what's already in the CPU queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) while ((irq = xive_scan_interrupts(xc, false)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) * We need to re-route that interrupt to its new destination.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) * First get and lock the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) struct irq_desc *desc = irq_to_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) struct irq_data *d = irq_desc_get_irq_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) struct xive_irq_data *xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) * Ignore anything that isn't a XIVE irq and ignore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) * IPIs, so can just be dropped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (d->domain != xive_irq_domain || hw_irq == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) * The IRQ should have already been re-routed, it's just a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) * stale in the old queue, so re-trigger it in order to make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) * it reach is new destination.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) #ifdef DEBUG_FLUSH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) pr_info("CPU %d: Got irq %d while offline, re-sending...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) cpu, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) raw_spin_lock(&desc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) xd = irq_desc_get_handler_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) * Clear saved_p to indicate that it's no longer pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) xd->saved_p = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) * For LSIs, we EOI, this will cause a resend if it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) * still asserted. Otherwise do an MSI retrigger.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) if (xd->flags & XIVE_IRQ_FLAG_LSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) xive_do_source_eoi(irqd_to_hwirq(d), xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) xive_irq_retrigger(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) raw_spin_unlock(&desc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) void xive_smp_disable_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) struct xive_cpu *xc = __this_cpu_read(xive_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) unsigned int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) /* Migrate interrupts away from the CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) irq_migrate_all_off_this_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) /* Set CPPR to 0 to disable flow of interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) xc->cppr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) /* Flush everything still in the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) xive_flush_cpu_queue(cpu, xc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) /* Re-enable CPPR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) xc->cppr = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) void xive_flush_interrupt(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) struct xive_cpu *xc = __this_cpu_read(xive_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) unsigned int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) /* Called if an interrupt occurs while the CPU is hot unplugged */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) xive_flush_cpu_queue(cpu, xc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) #endif /* CONFIG_HOTPLUG_CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) void xive_teardown_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) struct xive_cpu *xc = __this_cpu_read(xive_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) unsigned int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) /* Set CPPR to 0 to disable flow of interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) xc->cppr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) if (xive_ops->teardown_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) xive_ops->teardown_cpu(cpu, xc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) /* Get rid of IPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) xive_cleanup_cpu_ipi(cpu, xc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) /* Disable and free the queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) xive_cleanup_cpu_queues(cpu, xc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) void xive_shutdown(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) xive_ops->shutdown();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) bool __init xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) u8 max_prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) xive_tima = area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) xive_tima_offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) xive_ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) xive_irq_priority = max_prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) ppc_md.get_irq = xive_get_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) __xive_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) pr_devel("Initializing host..\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) xive_init_host();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) pr_devel("Initializing boot CPU..\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) /* Allocate per-CPU data and queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) xive_prepare_cpu(smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) /* Get ready for interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) xive_setup_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) pr_info("Interrupt handling initialized with %s backend\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) xive_ops->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) pr_info("Using priority %d for all interrupts\n", max_prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) unsigned int alloc_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) struct page *pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) __be32 *qpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) alloc_order = xive_alloc_order(queue_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) if (!pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) qpage = (__be32 *)page_address(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) memset(qpage, 0, 1 << queue_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) return qpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) static int __init xive_off(char *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) xive_cmdline_disabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) __setup("xive=off", xive_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) static void xive_debug_show_cpu(struct seq_file *m, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) seq_printf(m, "CPU %d:", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) if (xc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) seq_printf(m, "pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) seq_printf(m, "IPI=0x%08x PQ=%c%c ", xc->hw_ipi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) val & XIVE_ESB_VAL_P ? 'P' : '-',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) val & XIVE_ESB_VAL_Q ? 'Q' : '-');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) struct xive_q *q = &xc->queue[xive_irq_priority];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) u32 i0, i1, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) if (q->qpage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) idx = q->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) i0 = be32_to_cpup(q->qpage + idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) idx = (idx + 1) & q->msk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) i1 = be32_to_cpup(q->qpage + idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) seq_printf(m, "EQ idx=%d T=%d %08x %08x ...",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) q->idx, q->toggle, i0, i1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) seq_puts(m, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) static void xive_debug_show_irq(struct seq_file *m, u32 hw_irq, struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) struct irq_chip *chip = irq_data_get_irq_chip(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) u32 target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) u8 prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) u32 lirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) struct xive_irq_data *xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) if (!is_xive_irq(chip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) seq_printf(m, "IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) hw_irq, target, prio, lirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) xd = irq_data_get_irq_handler_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) val = xive_esb_read(xd, XIVE_ESB_GET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) seq_printf(m, "flags=%c%c%c PQ=%c%c",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) val & XIVE_ESB_VAL_P ? 'P' : '-',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) val & XIVE_ESB_VAL_Q ? 'Q' : '-');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) seq_puts(m, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) static int xive_core_debug_show(struct seq_file *m, void *private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) struct irq_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) if (xive_ops->debug_show)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) xive_ops->debug_show(m, private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) xive_debug_show_cpu(m, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) for_each_irq_desc(i, desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) struct irq_data *d = irq_desc_get_irq_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) unsigned int hw_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) hw_irq = (unsigned int)irqd_to_hwirq(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) /* IPIs are special (HW number 0) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if (hw_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) xive_debug_show_irq(m, hw_irq, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) DEFINE_SHOW_ATTRIBUTE(xive_core_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) int xive_core_debug_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) if (xive_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) debugfs_create_file("xive", 0400, powerpc_debugfs_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) NULL, &xive_core_debug_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) }