^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (C) 2016 Marvell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Yehuda Yitschak <yehuday@marvell.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * This file is licensed under the terms of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * License version 2. This program is licensed "as is" without any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * warranty of any kind, whether express or implied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/irqchip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/irqchip/chained_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define PIC_CAUSE 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define PIC_MASK 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define PIC_MAX_IRQS 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define PIC_MAX_IRQ_MASK ((1UL << PIC_MAX_IRQS) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct mvebu_pic {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) u32 parent_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct irq_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct irq_chip irq_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static void mvebu_pic_reset(struct mvebu_pic *pic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* ACK and mask all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) writel(0, pic->base + PIC_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) writel(PIC_MAX_IRQ_MASK, pic->base + PIC_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static void mvebu_pic_eoi_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct mvebu_pic *pic = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) writel(1 << d->hwirq, pic->base + PIC_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static void mvebu_pic_mask_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct mvebu_pic *pic = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) reg = readl(pic->base + PIC_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) reg |= (1 << d->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) writel(reg, pic->base + PIC_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static void mvebu_pic_unmask_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct mvebu_pic *pic = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) reg = readl(pic->base + PIC_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) reg &= ~(1 << d->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) writel(reg, pic->base + PIC_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static int mvebu_pic_irq_map(struct irq_domain *domain, unsigned int virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) irq_hw_number_t hwirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct mvebu_pic *pic = domain->host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) irq_set_percpu_devid(virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) irq_set_chip_data(virq, pic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) irq_set_chip_and_handler(virq, &pic->irq_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) handle_percpu_devid_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) irq_set_status_flags(virq, IRQ_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) irq_set_probe(virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static const struct irq_domain_ops mvebu_pic_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) .map = mvebu_pic_irq_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) .xlate = irq_domain_xlate_onecell,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static void mvebu_pic_handle_cascade_irq(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct mvebu_pic *pic = irq_desc_get_handler_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct irq_chip *chip = irq_desc_get_chip(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned long irqmap, irqn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) unsigned int cascade_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) irqmap = readl_relaxed(pic->base + PIC_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) chained_irq_enter(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) cascade_irq = irq_find_mapping(pic->domain, irqn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) generic_handle_irq(cascade_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) chained_irq_exit(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static void mvebu_pic_enable_percpu_irq(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct mvebu_pic *pic = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) mvebu_pic_reset(pic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) enable_percpu_irq(pic->parent_irq, IRQ_TYPE_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static void mvebu_pic_disable_percpu_irq(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct mvebu_pic *pic = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) disable_percpu_irq(pic->parent_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static int mvebu_pic_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct device_node *node = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct mvebu_pic *pic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct irq_chip *irq_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) pic = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pic), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (!pic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) pic->base = devm_ioremap_resource(&pdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (IS_ERR(pic->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return PTR_ERR(pic->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) irq_chip = &pic->irq_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) irq_chip->name = dev_name(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) irq_chip->irq_mask = mvebu_pic_mask_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) irq_chip->irq_unmask = mvebu_pic_unmask_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) irq_chip->irq_eoi = mvebu_pic_eoi_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) pic->parent_irq = irq_of_parse_and_map(node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (pic->parent_irq <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) dev_err(&pdev->dev, "Failed to parse parent interrupt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) pic->domain = irq_domain_add_linear(node, PIC_MAX_IRQS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) &mvebu_pic_domain_ops, pic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (!pic->domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) dev_err(&pdev->dev, "Failed to allocate irq domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) irq_set_chained_handler(pic->parent_irq, mvebu_pic_handle_cascade_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) irq_set_handler_data(pic->parent_irq, pic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) on_each_cpu(mvebu_pic_enable_percpu_irq, pic, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) platform_set_drvdata(pdev, pic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static int mvebu_pic_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct mvebu_pic *pic = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) on_each_cpu(mvebu_pic_disable_percpu_irq, pic, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) irq_domain_remove(pic->domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static const struct of_device_id mvebu_pic_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) { .compatible = "marvell,armada-8k-pic", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) MODULE_DEVICE_TABLE(of, mvebu_pic_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static struct platform_driver mvebu_pic_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) .probe = mvebu_pic_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) .remove = mvebu_pic_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) .name = "mvebu-pic",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) .of_match_table = mvebu_pic_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) module_platform_driver(mvebu_pic_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) MODULE_AUTHOR("Yehuda Yitschak <yehuday@marvell.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) MODULE_ALIAS("platform:mvebu_pic");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)