^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2017 SiFive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2018 Christoph Hellwig
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define pr_fmt(fmt) "plic: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/irqchip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/irqchip/chained_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * This driver implements a version of the RISC-V PLIC with the actual layout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * specified in chapter 8 of the SiFive U5 Coreplex Series Manual:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * The largest number supported by devices marked as 'sifive,plic-1.0.0', is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * Spec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define MAX_DEVICES 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define MAX_CONTEXTS 15872
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * Each interrupt source has a priority register associated with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * We always hardwire it to one in Linux.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define PRIORITY_BASE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define PRIORITY_PER_ID 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Each hart context has a vector of interrupt enable bits associated with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * There's one bit for each interrupt source.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define ENABLE_BASE 0x2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define ENABLE_PER_HART 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * Each hart context has a set of control registers associated with it. Right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * now there's only two: a source priority threshold over which the hart will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * take an interrupt, and a register to claim interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define CONTEXT_BASE 0x200000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define CONTEXT_PER_HART 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define CONTEXT_THRESHOLD 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define CONTEXT_CLAIM 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define PLIC_DISABLE_THRESHOLD 0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define PLIC_ENABLE_THRESHOLD 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct plic_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct cpumask lmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct irq_domain *irqdomain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) void __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct plic_handler {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) bool present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) void __iomem *hart_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * Protect mask operations on the registers given that we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * assume atomic memory operations work on them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) raw_spinlock_t enable_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) void __iomem *enable_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct plic_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static int plic_parent_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static bool plic_cpuhp_setup_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static inline void plic_toggle(struct plic_handler *handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int hwirq, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) u32 __iomem *reg = handler->enable_base + (hwirq / 32) * sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) u32 hwirq_mask = 1 << (hwirq % 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) raw_spin_lock(&handler->enable_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) writel(readl(reg) | hwirq_mask, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) writel(readl(reg) & ~hwirq_mask, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) raw_spin_unlock(&handler->enable_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static inline void plic_irq_toggle(const struct cpumask *mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct irq_data *d, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct plic_priv *priv = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) for_each_cpu(cpu, mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (handler->present &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) cpumask_test_cpu(cpu, &handler->priv->lmask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) plic_toggle(handler, d->hwirq, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static void plic_irq_unmask(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct cpumask amask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct plic_priv *priv = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) cpumask_and(&amask, &priv->lmask, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) &amask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) plic_irq_toggle(cpumask_of(cpu), d, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static void plic_irq_mask(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct plic_priv *priv = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) plic_irq_toggle(&priv->lmask, d, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static int plic_set_affinity(struct irq_data *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) const struct cpumask *mask_val, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct cpumask amask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct plic_priv *priv = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) cpumask_and(&amask, &priv->lmask, mask_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) cpu = cpumask_first(&amask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) cpu = cpumask_any_and(&amask, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (cpu >= nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) plic_irq_toggle(&priv->lmask, d, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) plic_irq_toggle(cpumask_of(cpu), d, !irqd_irq_masked(d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) irq_data_update_effective_affinity(d, cpumask_of(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return IRQ_SET_MASK_OK_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static void plic_irq_eoi(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (irqd_irq_masked(d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) plic_irq_unmask(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) plic_irq_mask(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static struct irq_chip plic_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) .name = "SiFive PLIC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) .irq_mask = plic_irq_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) .irq_unmask = plic_irq_unmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) .irq_eoi = plic_irq_eoi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) .irq_set_affinity = plic_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) irq_hw_number_t hwirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct plic_priv *priv = d->host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) handle_fasteoi_irq, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) irq_set_noprobe(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) irq_set_affinity(irq, &priv->lmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) unsigned int nr_irqs, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) irq_hw_number_t hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) unsigned int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct irq_fwspec *fwspec = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) for (i = 0; i < nr_irqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ret = plic_irqdomain_map(domain, virq + i, hwirq + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static const struct irq_domain_ops plic_irqdomain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) .translate = irq_domain_translate_onecell,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) .alloc = plic_irq_domain_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) .free = irq_domain_free_irqs_top,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * Handling an interrupt is a two-step process: first you claim the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * by reading the claim register, then you complete the interrupt by writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * that source ID back to the same claim register. This automatically enables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * and disables the interrupt, so there's nothing else to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static void plic_handle_irq(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct irq_chip *chip = irq_desc_get_chip(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) void __iomem *claim = handler->hart_base + CONTEXT_CLAIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) irq_hw_number_t hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) WARN_ON_ONCE(!handler->present);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) chained_irq_enter(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) while ((hwirq = readl(claim))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) int irq = irq_find_mapping(handler->priv->irqdomain, hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (unlikely(irq <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) generic_handle_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) chained_irq_exit(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static void plic_set_threshold(struct plic_handler *handler, u32 threshold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /* priority must be > threshold to trigger an interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) writel(threshold, handler->hart_base + CONTEXT_THRESHOLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static int plic_dying_cpu(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (plic_parent_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) disable_percpu_irq(plic_parent_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static int plic_starting_cpu(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (plic_parent_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) enable_percpu_irq(plic_parent_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) irq_get_trigger_type(plic_parent_irq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) pr_warn("cpu%d: parent irq not available\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static int __init plic_init(struct device_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct device_node *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) int error = 0, nr_contexts, nr_handlers = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) u32 nr_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct plic_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct plic_handler *handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) priv = kzalloc(sizeof(*priv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) priv->regs = of_iomap(node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (WARN_ON(!priv->regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) goto out_free_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) of_property_read_u32(node, "riscv,ndev", &nr_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (WARN_ON(!nr_irqs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) goto out_iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) nr_contexts = of_irq_count(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (WARN_ON(!nr_contexts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) goto out_iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) &plic_irqdomain_ops, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (WARN_ON(!priv->irqdomain))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) goto out_iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) for (i = 0; i < nr_contexts; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct of_phandle_args parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) irq_hw_number_t hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) int cpu, hartid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (of_irq_parse_one(node, i, &parent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) pr_err("failed to parse parent for context %d.\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * Skip contexts other than external interrupts for our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * privilege level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (parent.args[0] != RV_IRQ_EXT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) hartid = riscv_of_parent_hartid(parent.np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (hartid < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) pr_warn("failed to parse hart ID for context %d.\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) cpu = riscv_hartid_to_cpuid(hartid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (cpu < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) pr_warn("Invalid cpuid for context %d\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /* Find parent domain and register chained handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (!plic_parent_irq && irq_find_host(parent.np)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) plic_parent_irq = irq_of_parse_and_map(node, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (plic_parent_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) irq_set_chained_handler(plic_parent_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) plic_handle_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * When running in M-mode we need to ignore the S-mode handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * Here we assume it always comes later, but that might be a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * little fragile.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) handler = per_cpu_ptr(&plic_handlers, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (handler->present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) pr_warn("handler already present for context %d.\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) cpumask_set_cpu(cpu, &priv->lmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) handler->present = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) handler->hart_base =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) priv->regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) raw_spin_lock_init(&handler->enable_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) handler->enable_base =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) priv->regs + ENABLE_BASE + i * ENABLE_PER_HART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) handler->priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) plic_toggle(handler, hwirq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) nr_handlers++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * We can have multiple PLIC instances so setup cpuhp state only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * when context handler for current/boot CPU is present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) handler = this_cpu_ptr(&plic_handlers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (handler->present && !plic_cpuhp_setup_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) "irqchip/sifive/plic:starting",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) plic_starting_cpu, plic_dying_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) plic_cpuhp_setup_done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) pr_info("%pOFP: mapped %d interrupts with %d handlers for"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) " %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) out_iounmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) iounmap(priv->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) out_free_priv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) kfree(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_init); /* for firmware driver */