^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2015 Freescale Semiconductor, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/irqchip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/syscore_ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define IMR_NUM 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define GPC_MAX_IRQS (IMR_NUM * 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define GPC_IMR1_CORE0 0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define GPC_IMR1_CORE1 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define GPC_IMR1_CORE2 0x1c0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define GPC_IMR1_CORE3 0x1d0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct gpcv2_irqchip_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct raw_spinlock rlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) void __iomem *gpc_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) u32 wakeup_sources[IMR_NUM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) u32 saved_irq_mask[IMR_NUM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) u32 cpu2wakeup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static struct gpcv2_irqchip_data *imx_gpcv2_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static void __iomem *gpcv2_idx_to_reg(struct gpcv2_irqchip_data *cd, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return cd->gpc_base + cd->cpu2wakeup + i * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static int gpcv2_wakeup_source_save(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct gpcv2_irqchip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) void __iomem *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) cd = imx_gpcv2_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (!cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) for (i = 0; i < IMR_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) reg = gpcv2_idx_to_reg(cd, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) cd->saved_irq_mask[i] = readl_relaxed(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) writel_relaxed(cd->wakeup_sources[i], reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static void gpcv2_wakeup_source_restore(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct gpcv2_irqchip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) cd = imx_gpcv2_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (!cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) for (i = 0; i < IMR_NUM; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) writel_relaxed(cd->saved_irq_mask[i], gpcv2_idx_to_reg(cd, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static struct syscore_ops imx_gpcv2_syscore_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) .suspend = gpcv2_wakeup_source_save,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) .resume = gpcv2_wakeup_source_restore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static int imx_gpcv2_irq_set_wake(struct irq_data *d, unsigned int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct gpcv2_irqchip_data *cd = d->chip_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) unsigned int idx = d->hwirq / 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) u32 mask, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) raw_spin_lock_irqsave(&cd->rlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) mask = BIT(d->hwirq % 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) val = cd->wakeup_sources[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) cd->wakeup_sources[idx] = on ? (val & ~mask) : (val | mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) raw_spin_unlock_irqrestore(&cd->rlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * Do *not* call into the parent, as the GIC doesn't have any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * wake-up facility...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static void imx_gpcv2_irq_unmask(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct gpcv2_irqchip_data *cd = d->chip_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) void __iomem *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) raw_spin_lock(&cd->rlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) reg = gpcv2_idx_to_reg(cd, d->hwirq / 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) val = readl_relaxed(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) val &= ~BIT(d->hwirq % 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) writel_relaxed(val, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) raw_spin_unlock(&cd->rlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) irq_chip_unmask_parent(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static void imx_gpcv2_irq_mask(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct gpcv2_irqchip_data *cd = d->chip_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) void __iomem *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) raw_spin_lock(&cd->rlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) reg = gpcv2_idx_to_reg(cd, d->hwirq / 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) val = readl_relaxed(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) val |= BIT(d->hwirq % 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) writel_relaxed(val, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) raw_spin_unlock(&cd->rlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) irq_chip_mask_parent(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static struct irq_chip gpcv2_irqchip_data_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) .name = "GPCv2",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) .irq_eoi = irq_chip_eoi_parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) .irq_mask = imx_gpcv2_irq_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) .irq_unmask = imx_gpcv2_irq_unmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) .irq_set_wake = imx_gpcv2_irq_set_wake,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) .irq_retrigger = irq_chip_retrigger_hierarchy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) .irq_set_type = irq_chip_set_type_parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) .irq_set_affinity = irq_chip_set_affinity_parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static int imx_gpcv2_domain_translate(struct irq_domain *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct irq_fwspec *fwspec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) unsigned long *hwirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) unsigned int *type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (is_of_node(fwspec->fwnode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (fwspec->param_count != 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* No PPI should point to this domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (fwspec->param[0] != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) *hwirq = fwspec->param[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) *type = fwspec->param[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static int imx_gpcv2_domain_alloc(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) unsigned int irq, unsigned int nr_irqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct irq_fwspec *fwspec = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct irq_fwspec parent_fwspec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) irq_hw_number_t hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) unsigned int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) err = imx_gpcv2_domain_translate(domain, fwspec, &hwirq, &type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (hwirq >= GPC_MAX_IRQS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) for (i = 0; i < nr_irqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) &gpcv2_irqchip_data_chip, domain->host_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) parent_fwspec = *fwspec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) parent_fwspec.fwnode = domain->parent->fwnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) &parent_fwspec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static const struct irq_domain_ops gpcv2_irqchip_data_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) .translate = imx_gpcv2_domain_translate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) .alloc = imx_gpcv2_domain_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) .free = irq_domain_free_irqs_common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static const struct of_device_id gpcv2_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) { .compatible = "fsl,imx7d-gpc", .data = (const void *) 2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) { .compatible = "fsl,imx8mq-gpc", .data = (const void *) 4 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) { /* END */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static int __init imx_gpcv2_irqchip_init(struct device_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct device_node *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct irq_domain *parent_domain, *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct gpcv2_irqchip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) const struct of_device_id *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) unsigned long core_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (!parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) pr_err("%pOF: no parent, giving up\n", node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) id = of_match_node(gpcv2_of_match, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (!id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) pr_err("%pOF: unknown compatibility string\n", node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) core_num = (unsigned long)id->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) parent_domain = irq_find_host(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (!parent_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) pr_err("%pOF: unable to get parent domain\n", node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) cd = kzalloc(sizeof(struct gpcv2_irqchip_data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (!cd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) pr_err("%pOF: kzalloc failed!\n", node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) raw_spin_lock_init(&cd->rlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) cd->gpc_base = of_iomap(node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (!cd->gpc_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) pr_err("%pOF: unable to map gpc registers\n", node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) kfree(cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) node, &gpcv2_irqchip_data_domain_ops, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (!domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) iounmap(cd->gpc_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) kfree(cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) irq_set_default_host(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /* Initially mask all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) for (i = 0; i < IMR_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) void __iomem *reg = cd->gpc_base + i * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) switch (core_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) writel_relaxed(~0, reg + GPC_IMR1_CORE2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) writel_relaxed(~0, reg + GPC_IMR1_CORE3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) writel_relaxed(~0, reg + GPC_IMR1_CORE0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) writel_relaxed(~0, reg + GPC_IMR1_CORE1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) cd->wakeup_sources[i] = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /* Let CORE0 as the default CPU to wake up by GPC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) cd->cpu2wakeup = GPC_IMR1_CORE0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * Due to hardware design failure, need to make sure GPR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * interrupt(#32) is unmasked during RUN mode to avoid entering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * DSM by mistake.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) writel_relaxed(~0x1, cd->gpc_base + cd->cpu2wakeup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) imx_gpcv2_instance = cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) register_syscore_ops(&imx_gpcv2_syscore_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * Clear the OF_POPULATED flag set in of_irq_init so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * later the GPC power domain driver will not be skipped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) of_node_clear_flag(node, OF_POPULATED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) IRQCHIP_DECLARE(imx_gpcv2_imx7d, "fsl,imx7d-gpc", imx_gpcv2_irqchip_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) IRQCHIP_DECLARE(imx_gpcv2_imx8mq, "fsl,imx8mq-gpc", imx_gpcv2_irqchip_init);