^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * RDA8810PL SoC irqchip driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright RDA Microelectronics Company Limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (c) 2017 Andreas Färber
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2018 Manivannan Sadhasivam
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/irqchip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/exception.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define RDA_INTC_FINALSTATUS 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define RDA_INTC_MASK_SET 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define RDA_INTC_MASK_CLR 0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define RDA_IRQ_MASK_ALL 0xFFFFFFFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define RDA_NR_IRQS 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static void __iomem *rda_intc_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static struct irq_domain *rda_irq_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static void rda_intc_mask_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) writel_relaxed(BIT(d->hwirq), rda_intc_base + RDA_INTC_MASK_CLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static void rda_intc_unmask_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) writel_relaxed(BIT(d->hwirq), rda_intc_base + RDA_INTC_MASK_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static int rda_intc_set_type(struct irq_data *data, unsigned int flow_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* Hardware supports only level triggered interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if ((flow_type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) == flow_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static void __exception_irq_entry rda_handle_irq(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) u32 stat = readl_relaxed(rda_intc_base + RDA_INTC_FINALSTATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) u32 hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) while (stat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) hwirq = __fls(stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) handle_domain_irq(rda_irq_domain, hwirq, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) stat &= ~BIT(hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static struct irq_chip rda_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) .name = "rda-intc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) .irq_mask = rda_intc_mask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) .irq_unmask = rda_intc_unmask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) .irq_set_type = rda_intc_set_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static int rda_irq_map(struct irq_domain *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) unsigned int virq, irq_hw_number_t hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) irq_set_status_flags(virq, IRQ_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) irq_set_chip_and_handler(virq, &rda_irq_chip, handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) irq_set_chip_data(virq, d->host_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) irq_set_probe(virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static const struct irq_domain_ops rda_irq_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) .map = rda_irq_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) .xlate = irq_domain_xlate_onecell,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static int __init rda8810_intc_init(struct device_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct device_node *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) rda_intc_base = of_io_request_and_map(node, 0, "rda-intc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (IS_ERR(rda_intc_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return PTR_ERR(rda_intc_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* Mask all interrupt sources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) writel_relaxed(RDA_IRQ_MASK_ALL, rda_intc_base + RDA_INTC_MASK_CLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) rda_irq_domain = irq_domain_create_linear(&node->fwnode, RDA_NR_IRQS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) &rda_irq_domain_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) rda_intc_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (!rda_irq_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) iounmap(rda_intc_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) set_handle_irq(rda_handle_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) IRQCHIP_DECLARE(rda_intc, "rda,8810pl-intc", rda8810_intc_init);