^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Marvell Orion SoCs IRQ chip driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * This file is licensed under the terms of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * License version 2. This program is licensed "as is" without any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * warranty of any kind, whether express or implied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/irqchip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/exception.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/mach/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * Orion SoC main interrupt controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define ORION_IRQS_PER_CHIP 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define ORION_IRQ_CAUSE 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define ORION_IRQ_MASK 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define ORION_IRQ_FIQ_MASK 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define ORION_IRQ_ENDP_MASK 0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static struct irq_domain *orion_irq_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) __exception_irq_entry orion_handle_irq(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct irq_domain_chip_generic *dgc = orion_irq_domain->gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) int n, base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) for (n = 0; n < dgc->num_chips; n++, base += ORION_IRQS_PER_CHIP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct irq_chip_generic *gc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) irq_get_domain_generic_chip(orion_irq_domain, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) u32 stat = readl_relaxed(gc->reg_base + ORION_IRQ_CAUSE) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) gc->mask_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) while (stat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) u32 hwirq = __fls(stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) handle_domain_irq(orion_irq_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) gc->irq_base + hwirq, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) stat &= ~(1 << hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static int __init orion_irq_init(struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct device_node *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) int n, ret, base, num_chips = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct resource r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* count number of irq chips by valid reg addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) while (of_address_to_resource(np, num_chips, &r) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) num_chips++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) orion_irq_domain = irq_domain_add_linear(np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) num_chips * ORION_IRQS_PER_CHIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) &irq_generic_chip_ops, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (!orion_irq_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) panic("%pOFn: unable to add irq domain\n", np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) ret = irq_alloc_domain_generic_chips(orion_irq_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) ORION_IRQS_PER_CHIP, 1, np->full_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) handle_level_irq, clr, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) IRQ_GC_INIT_MASK_CACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) panic("%pOFn: unable to alloc irq domain gc\n", np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) for (n = 0, base = 0; n < num_chips; n++, base += ORION_IRQS_PER_CHIP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct irq_chip_generic *gc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) irq_get_domain_generic_chip(orion_irq_domain, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) of_address_to_resource(np, n, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (!request_mem_region(r.start, resource_size(&r), np->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) panic("%pOFn: unable to request mem region %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) np, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) gc->reg_base = ioremap(r.start, resource_size(&r));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (!gc->reg_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) panic("%pOFn: unable to map resource %d", np, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) gc->chip_types[0].regs.mask = ORION_IRQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* mask all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) writel(0, gc->reg_base + ORION_IRQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) set_handle_irq(orion_handle_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) IRQCHIP_DECLARE(orion_intc, "marvell,orion-intc", orion_irq_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * Orion SoC bridge interrupt controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define ORION_BRIDGE_IRQ_CAUSE 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define ORION_BRIDGE_IRQ_MASK 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static void orion_bridge_irq_handler(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct irq_domain *d = irq_desc_get_handler_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) gc->mask_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) while (stat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) u32 hwirq = __fls(stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) generic_handle_irq(irq_find_mapping(d, gc->irq_base + hwirq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) stat &= ~(1 << hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * Bridge IRQ_CAUSE is asserted regardless of IRQ_MASK register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * To avoid interrupt events on stale irqs, we clear them before unmask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static unsigned int orion_bridge_irq_startup(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct irq_chip_type *ct = irq_data_get_chip_type(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) ct->chip.irq_ack(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) ct->chip.irq_unmask(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static int __init orion_bridge_irq_init(struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct device_node *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct resource r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct irq_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct irq_chip_generic *gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int ret, irq, nrirqs = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* get optional number of interrupts provided */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) of_property_read_u32(np, "marvell,#interrupts", &nrirqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) domain = irq_domain_add_linear(np, nrirqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) &irq_generic_chip_ops, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (!domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) pr_err("%pOFn: unable to add irq domain\n", np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) handle_edge_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) pr_err("%pOFn: unable to alloc irq domain gc\n", np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) ret = of_address_to_resource(np, 0, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) pr_err("%pOFn: unable to get resource\n", np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (!request_mem_region(r.start, resource_size(&r), np->name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) pr_err("%s: unable to request mem region\n", np->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /* Map the parent interrupt for the chained handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) irq = irq_of_parse_and_map(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (irq <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) pr_err("%pOFn: unable to parse irq\n", np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) gc = irq_get_domain_generic_chip(domain, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) gc->reg_base = ioremap(r.start, resource_size(&r));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (!gc->reg_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) pr_err("%pOFn: unable to map resource\n", np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) gc->chip_types[0].chip.irq_startup = orion_bridge_irq_startup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /* mask and clear all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) irq_set_chained_handler_and_data(irq, orion_bridge_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) IRQCHIP_DECLARE(orion_bridge_intc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) "marvell,orion-bridge-intc", orion_bridge_irq_init);