Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Author: Marc Zyngier <marc.zyngier@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #define pr_fmt(fmt)	"GICv3: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/cpu_pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/refcount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/syscore_ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/wakeup_reason.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <trace/hooks/gic_v3.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/irqchip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/irqchip/arm-gic-common.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/irqchip/arm-gic-v3.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/irqchip/irq-partition-percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <asm/cputype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <asm/exception.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <asm/smp_plat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <asm/virt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <trace/hooks/gic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include "irq-gic-common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define GICD_INT_NMI_PRI	(GICD_INT_DEF_PRI & ~0x80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996	(1ULL << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539	(1ULL << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define GIC_IRQ_TYPE_PARTITION	(GIC_IRQ_TYPE_LPI + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) struct redist_region {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	void __iomem		*redist_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	phys_addr_t		phys_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	bool			single_redist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) static struct gic_chip_data gic_data __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define GIC_ID_NR	(1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define GIC_LINE_NR	min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #define GIC_ESPI_NR	GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61)  * The behaviours of RPR and PMR registers differ depending on the value of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62)  * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63)  * distributor and redistributors depends on whether security is enabled in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64)  * GIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66)  * When security is enabled, non-secure priority values from the (re)distributor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67)  * are presented to the GIC CPUIF as follow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68)  *     (GIC_(R)DIST_PRI[irq] >> 1) | 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70)  * If SCR_EL3.FIQ == 1, the values writen to/read from PMR and RPR at non-secure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71)  * EL1 are subject to a similar operation thus matching the priorities presented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72)  * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73)  * these values are unchanched by the GIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75)  * see GICv3/GICv4 Architecture Specification (IHI0069D):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76)  * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77)  *   priorities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78)  * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79)  *   interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84)  * Global static key controlling whether an update to PMR allowing more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85)  * interrupts requires to be propagated to the redistributor (DSB SY).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86)  * And this needs to be exported for modules to be able to enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87)  * interrupts...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) DEFINE_STATIC_KEY_FALSE(gic_pmr_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) EXPORT_SYMBOL(gic_pmr_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) EXPORT_SYMBOL(gic_nonsecure_priorities);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96)  * When the Non-secure world has access to group 0 interrupts (as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97)  * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98)  * return the Distributor's view of the interrupt priority.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100)  * When GIC security is enabled (GICD_CTLR.DS == 0), the interrupt priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101)  * written by software is moved to the Non-secure range by the Distributor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103)  * If both are true (which is when gic_nonsecure_priorities gets enabled),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104)  * we need to shift down the priority programmed by software to match it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105)  * against the value returned by ICC_RPR_EL1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) #define GICD_INT_RPR_PRI(priority)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	({								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		u32 __priority = (priority);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		if (static_branch_unlikely(&gic_nonsecure_priorities))	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 			__priority = 0x80 | (__priority >> 1);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		__priority;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) /* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) static refcount_t *ppi_nmi_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) static struct gic_kvm_info gic_v3_kvm_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) static DEFINE_PER_CPU(bool, has_rss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) #define MPIDR_RS(mpidr)			(((mpidr) & 0xF0UL) >> 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) #define gic_data_rdist()		(this_cpu_ptr(gic_data.rdists.rdist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) #define gic_data_rdist_rd_base()	(gic_data_rdist()->rd_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) #define gic_data_rdist_sgi_base()	(gic_data_rdist_rd_base() + SZ_64K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) /* Our default, arbitrary priority value. Linux only uses one anyway. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) #define DEFAULT_PMR_VALUE	0xf0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) enum gic_intid_range {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	SGI_RANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	PPI_RANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	SPI_RANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	EPPI_RANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	ESPI_RANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	LPI_RANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	__INVALID_RANGE__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	switch (hwirq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	case 0 ... 15:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 		return SGI_RANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	case 16 ... 31:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 		return PPI_RANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	case 32 ... 1019:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 		return SPI_RANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 		return EPPI_RANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		return ESPI_RANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	case 8192 ... GENMASK(23, 0):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 		return LPI_RANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		return __INVALID_RANGE__;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) static enum gic_intid_range get_intid_range(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	return __get_intid_range(d->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) static inline unsigned int gic_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	return d->hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) static inline bool gic_irq_in_rdist(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	switch (get_intid_range(d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	case SGI_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	case PPI_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	case EPPI_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) static inline void __iomem *gic_dist_base(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	switch (get_intid_range(d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	case SGI_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	case PPI_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	case EPPI_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 		/* SGI+PPI -> SGI_base for this CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 		return gic_data_rdist_sgi_base();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	case SPI_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	case ESPI_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		/* SPI -> dist_base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		return gic_data.dist_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) static void gic_do_wait_for_rwp(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	u32 count = 1000000;	/* 1s! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		if (!count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 			pr_err_ratelimited("RWP timeout, gone fishing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 		udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) /* Wait for completion of a distributor change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) static void gic_dist_wait_for_rwp(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	gic_do_wait_for_rwp(gic_data.dist_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) /* Wait for completion of a redistributor change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) static void gic_redist_wait_for_rwp(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	gic_do_wait_for_rwp(gic_data_rdist_rd_base());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) #ifdef CONFIG_ARM64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) static u64 __maybe_unused gic_read_iar(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		return gic_read_iar_cavium_thunderx();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		return gic_read_iar_common();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) static void gic_enable_redist(bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	void __iomem *rbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	u32 count = 1000000;	/* 1s! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	rbase = gic_data_rdist_rd_base();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	val = readl_relaxed(rbase + GICR_WAKER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		/* Wake up this CPU redistributor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		val &= ~GICR_WAKER_ProcessorSleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		val |= GICR_WAKER_ProcessorSleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	writel_relaxed(val, rbase + GICR_WAKER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	if (!enable) {		/* Check that GICR_WAKER is writeable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		val = readl_relaxed(rbase + GICR_WAKER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		if (!(val & GICR_WAKER_ProcessorSleep))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 			return;	/* No PM support in this redistributor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	while (--count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		val = readl_relaxed(rbase + GICR_WAKER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		pr_err_ratelimited("redistributor failed to %s...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 				   enable ? "wakeup" : "sleep");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277)  * Routines to disable, enable, EOI and route interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	switch (get_intid_range(d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	case SGI_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	case PPI_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	case SPI_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		*index = d->hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		return offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	case EPPI_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 		 * Contrary to the ESPI range, the EPPI range is contiguous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		 * to the PPI range in the registers, so let's adjust the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		 * displacement accordingly. Consistency is overrated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		*index = d->hwirq - EPPI_BASE_INTID + 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		return offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	case ESPI_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		*index = d->hwirq - ESPI_BASE_INTID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		switch (offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		case GICD_ISENABLER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 			return GICD_ISENABLERnE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		case GICD_ICENABLER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 			return GICD_ICENABLERnE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		case GICD_ISPENDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 			return GICD_ISPENDRnE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		case GICD_ICPENDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 			return GICD_ICPENDRnE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 		case GICD_ISACTIVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 			return GICD_ISACTIVERnE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		case GICD_ICACTIVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 			return GICD_ICACTIVERnE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		case GICD_IPRIORITYR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 			return GICD_IPRIORITYRnE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		case GICD_ICFGR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 			return GICD_ICFGRnE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		case GICD_IROUTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 			return GICD_IROUTERnE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	*index = d->hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	return offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) static int gic_peek_irq(struct irq_data *d, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	u32 index, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	offset = convert_offset_index(d, offset, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	mask = 1 << (index % 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	if (gic_irq_in_rdist(d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		base = gic_data_rdist_sgi_base();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		base = gic_data.dist_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) static void gic_poke_irq(struct irq_data *d, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	void (*rwp_wait)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	u32 index, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	offset = convert_offset_index(d, offset, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	mask = 1 << (index % 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	if (gic_irq_in_rdist(d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		base = gic_data_rdist_sgi_base();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		rwp_wait = gic_redist_wait_for_rwp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		base = gic_data.dist_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		rwp_wait = gic_dist_wait_for_rwp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	writel_relaxed(mask, base + offset + (index / 32) * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	rwp_wait();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) static void gic_mask_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	gic_poke_irq(d, GICD_ICENABLER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) static void gic_eoimode1_mask_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	gic_mask_irq(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	 * When masking a forwarded interrupt, make sure it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	 * deactivated as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	 * This ensures that an interrupt that is getting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	 * disabled/masked will not get "stuck", because there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	 * noone to deactivate it (guest is being terminated).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	if (irqd_is_forwarded_to_vcpu(d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		gic_poke_irq(d, GICD_ICACTIVER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) static void gic_unmask_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	gic_poke_irq(d, GICD_ISENABLER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) static inline bool gic_supports_nmi(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	       static_branch_likely(&supports_pseudo_nmis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) static int gic_irq_set_irqchip_state(struct irq_data *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 				     enum irqchip_irq_state which, bool val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	if (d->hwirq >= 8192) /* SGI/PPI/SPI only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	switch (which) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	case IRQCHIP_STATE_PENDING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		reg = val ? GICD_ISPENDR : GICD_ICPENDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	case IRQCHIP_STATE_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	case IRQCHIP_STATE_MASKED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		reg = val ? GICD_ICENABLER : GICD_ISENABLER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	gic_poke_irq(d, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) static int gic_irq_get_irqchip_state(struct irq_data *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 				     enum irqchip_irq_state which, bool *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	if (d->hwirq >= 8192) /* PPI/SPI only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	switch (which) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	case IRQCHIP_STATE_PENDING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		*val = gic_peek_irq(d, GICD_ISPENDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	case IRQCHIP_STATE_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		*val = gic_peek_irq(d, GICD_ISACTIVER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	case IRQCHIP_STATE_MASKED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		*val = !gic_peek_irq(d, GICD_ISENABLER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) static void gic_irq_set_prio(struct irq_data *d, u8 prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	void __iomem *base = gic_dist_base(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	u32 offset, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	offset = convert_offset_index(d, GICD_IPRIORITYR, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	writeb_relaxed(prio, base + offset + index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) static u32 gic_get_ppi_index(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	switch (get_intid_range(d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	case PPI_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		return d->hwirq - 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	case EPPI_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		return d->hwirq - EPPI_BASE_INTID + 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		unreachable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) static int gic_irq_nmi_setup(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	struct irq_desc *desc = irq_to_desc(d->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	if (!gic_supports_nmi())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	if (gic_peek_irq(d, GICD_ISENABLER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	 * A secondary irq_chip should be in charge of LPI request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	 * it should not be possible to get there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	if (WARN_ON(gic_irq(d) >= 8192))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	/* desc lock should already be held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	if (gic_irq_in_rdist(d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		u32 idx = gic_get_ppi_index(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		/* Setting up PPI as NMI, only switch handler for first NMI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 			refcount_set(&ppi_nmi_refs[idx], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 			desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		desc->handle_irq = handle_fasteoi_nmi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	gic_irq_set_prio(d, GICD_INT_NMI_PRI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) static void gic_irq_nmi_teardown(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	struct irq_desc *desc = irq_to_desc(d->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	if (WARN_ON(!gic_supports_nmi()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	if (gic_peek_irq(d, GICD_ISENABLER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	 * A secondary irq_chip should be in charge of LPI request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	 * it should not be possible to get there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	if (WARN_ON(gic_irq(d) >= 8192))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	/* desc lock should already be held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	if (gic_irq_in_rdist(d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		u32 idx = gic_get_ppi_index(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		/* Tearing down NMI, only switch handler for last NMI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		if (refcount_dec_and_test(&ppi_nmi_refs[idx]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 			desc->handle_irq = handle_percpu_devid_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		desc->handle_irq = handle_fasteoi_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	gic_irq_set_prio(d, GICD_INT_DEF_PRI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) static void gic_eoi_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	gic_write_eoir(gic_irq(d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) static void gic_eoimode1_eoi_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	 * No need to deactivate an LPI, or an interrupt that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	 * is is getting forwarded to a vcpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	gic_write_dir(gic_irq(d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) static int gic_set_type(struct irq_data *d, unsigned int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	enum gic_intid_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	unsigned int irq = gic_irq(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	void (*rwp_wait)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	u32 offset, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	range = get_intid_range(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	/* Interrupt configuration for SGIs can't be changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	if (range == SGI_RANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	/* SPIs have restrictions on the supported types */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	if ((range == SPI_RANGE || range == ESPI_RANGE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	    type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	if (gic_irq_in_rdist(d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		base = gic_data_rdist_sgi_base();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		rwp_wait = gic_redist_wait_for_rwp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		base = gic_data.dist_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		rwp_wait = gic_dist_wait_for_rwp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	offset = convert_offset_index(d, GICD_ICFGR, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	ret = gic_configure_irq(index, type, base + offset, rwp_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		/* Misconfigured PPIs are usually not fatal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	if (get_intid_range(d) == SGI_RANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	if (vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		irqd_set_forwarded_to_vcpu(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		irqd_clr_forwarded_to_vcpu(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) static u64 gic_mpidr_to_affinity(unsigned long mpidr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	u64 aff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	       MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	       MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8  |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	       MPIDR_AFFINITY_LEVEL(mpidr, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	return aff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) static void gic_deactivate_unhandled(u32 irqnr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	if (static_branch_likely(&supports_deactivate_key)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		if (irqnr < 8192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 			gic_write_dir(irqnr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		gic_write_eoir(irqnr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	bool irqs_enabled = interrupts_enabled(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	if (irqs_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		nmi_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	if (static_branch_likely(&supports_deactivate_key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		gic_write_eoir(irqnr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	 * Leave the PSR.I bit set to prevent other NMIs to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	 * received while handling this one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	 * PSR.I will be restored when we ERET to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	 * interrupted context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	err = handle_domain_nmi(gic_data.domain, irqnr, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		gic_deactivate_unhandled(irqnr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	if (irqs_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		nmi_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) static u32 do_read_iar(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	u32 iar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	if (gic_supports_nmi() && unlikely(!interrupts_enabled(regs))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		u64 pmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		 * We were in a context with IRQs disabled. However, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		 * entry code has set PMR to a value that allows any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		 * interrupt to be acknowledged, and not just NMIs. This can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		 * lead to surprising effects if the NMI has been retired in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		 * the meantime, and that there is an IRQ pending. The IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		 * would then be taken in NMI context, something that nobody
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		 * wants to debug twice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		 * Until we sort this, drop PMR again to a level that will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		 * actually only allow NMIs before reading IAR, and then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		 * restore it to what it was.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		pmr = gic_read_pmr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		gic_pmr_mask_irqs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		iar = gic_read_iar();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		gic_write_pmr(pmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		iar = gic_read_iar();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	return iar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	u32 irqnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	irqnr = do_read_iar(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	/* Check for special IDs first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	if ((irqnr >= 1020 && irqnr <= 1023))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	if (gic_supports_nmi() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	    unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		gic_handle_nmi(irqnr, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	if (gic_prio_masking_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		gic_pmr_mask_irqs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		gic_arch_enable_irqs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	if (static_branch_likely(&supports_deactivate_key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		gic_write_eoir(irqnr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	if (handle_domain_irq(gic_data.domain, irqnr, regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		WARN_ONCE(true, "Unexpected interrupt received!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		log_abnormal_wakeup_reason("unexpected HW IRQ %u", irqnr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		gic_deactivate_unhandled(irqnr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) static u32 gic_get_pribits(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	u32 pribits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	pribits = gic_read_ctlr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	pribits++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	return pribits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) static bool gic_has_group0(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	u32 old_pmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	old_pmr = gic_read_pmr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	 * Let's find out if Group0 is under control of EL3 or not by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	 * setting the highest possible, non-zero priority in PMR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	 * If SCR_EL3.FIQ is set, the priority gets shifted down in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	 * order for the CPU interface to set bit 7, and keep the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	 * actual priority in the non-secure range. In the process, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	 * looses the least significant bit and the actual priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	 * becomes 0x80. Reading it back returns 0, indicating that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	 * we're don't have access to Group0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	gic_write_pmr(BIT(8 - gic_get_pribits()));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	val = gic_read_pmr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	gic_write_pmr(old_pmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	return val != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) static void __init gic_dist_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	u64 affinity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	void __iomem *base = gic_data.dist_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	/* Disable the distributor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	writel_relaxed(0, base + GICD_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	gic_dist_wait_for_rwp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	 * Configure SPIs as non-secure Group-1. This will only matter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	 * if the GIC only has a single security state. This will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	 * do the right thing if the kernel is running in secure mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	 * but that's not the intended use case anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	for (i = 32; i < GIC_LINE_NR; i += 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	/* Extended SPI range, not handled by the GICv2/GICv3 common code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	for (i = 0; i < GIC_ESPI_NR; i += 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	for (i = 0; i < GIC_ESPI_NR; i += 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	for (i = 0; i < GIC_ESPI_NR; i += 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		writel_relaxed(0, base + GICD_ICFGRnE + i / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	for (i = 0; i < GIC_ESPI_NR; i += 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	/* Now do the common stuff, and wait for the distributor to drain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		pr_info("Enabling SGIs without active state\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		val |= GICD_CTLR_nASSGIreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	/* Enable distributor with ARE, Group1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	writel_relaxed(val, base + GICD_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	 * Set all global interrupts to the boot CPU only. ARE must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	 * enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	for (i = 32; i < GIC_LINE_NR; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	for (i = 0; i < GIC_ESPI_NR; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	for (i = 0; i < gic_data.nr_redist_regions; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		void __iomem *ptr = gic_data.redist_regions[i].redist_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		u64 typer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		if (reg != GIC_PIDR2_ARCH_GICv3 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		    reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 			pr_warn("No redistributor present @%p\n", ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 			typer = gic_read_typer(ptr + GICR_TYPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 			ret = fn(gic_data.redist_regions + i, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 			if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 			if (gic_data.redist_regions[i].single_redist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 			if (gic_data.redist_stride) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 				ptr += gic_data.redist_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 				ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 				if (typer & GICR_TYPER_VLPIS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 					ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		} while (!(typer & GICR_TYPER_LAST));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	return ret ? -ENODEV : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	unsigned long mpidr = cpu_logical_map(smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	u64 typer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	u32 aff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	 * Convert affinity to a 32bit value that can be matched to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	 * GICR_TYPER bits [63:32].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	       MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	       MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	       MPIDR_AFFINITY_LEVEL(mpidr, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	typer = gic_read_typer(ptr + GICR_TYPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	if ((typer >> 32) == aff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		u64 offset = ptr - region->redist_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		raw_spin_lock_init(&gic_data_rdist()->rd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		gic_data_rdist_rd_base() = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		gic_data_rdist()->phys_base = region->phys_base + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 			smp_processor_id(), mpidr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			(int)(region - gic_data.redist_regions),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 			&gic_data_rdist()->phys_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	/* Try next one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) static int gic_populate_rdist(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	if (gic_iterate_rdists(__gic_populate_rdist) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	/* We couldn't even deal with ourselves... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	     smp_processor_id(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	     (unsigned long)cpu_logical_map(smp_processor_id()));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) static int __gic_update_rdist_properties(struct redist_region *region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 					 void __iomem *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	u64 typer = gic_read_typer(ptr + GICR_TYPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	/* Boot-time cleanip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		/* Deactivate any present vPE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		if (val & GICR_VPENDBASER_Valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 			gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 					      ptr + SZ_128K + GICR_VPENDBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		/* Mark the VPE table as invalid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		val &= ~GICR_VPROPBASER_4_1_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	/* RVPEID implies some form of DirectLPI, no matter what the doc says... :-/ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 					   gic_data.rdists.has_rvpeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	/* Detect non-sensical configurations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		gic_data.rdists.has_direct_lpi = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		gic_data.rdists.has_vlpis = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		gic_data.rdists.has_rvpeid = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) static void gic_update_rdist_properties(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	gic_data.ppi_nr = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	gic_iterate_rdists(__gic_update_rdist_properties);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		gic_data.ppi_nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	pr_info("%d PPIs implemented\n", gic_data.ppi_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	if (gic_data.rdists.has_vlpis)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		pr_info("GICv4 features: %s%s%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 			gic_data.rdists.has_direct_lpi ? "DirectLPI " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 			gic_data.rdists.has_rvpeid ? "RVPEID " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 			gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) /* Check whether it's single security state view */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) static inline bool gic_dist_security_disabled(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) static void gic_cpu_sys_reg_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	int i, cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	u64 mpidr = cpu_logical_map(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	u64 need_rss = MPIDR_RS(mpidr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	bool group0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	u32 pribits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	 * Need to check that the SRE bit has actually been set. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	 * not, it means that SRE is disabled at EL2. We're going to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	 * die painfully, and there is nothing we can do about it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	 * Kindly inform the luser.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	if (!gic_enable_sre())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	pribits = gic_get_pribits();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	group0 = gic_has_group0();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	/* Set priority mask register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	if (!gic_prio_masking_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	} else if (gic_supports_nmi()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		 * Mismatch configuration with boot CPU, the system is likely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		 * to die as interrupt masking will not work properly on all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		 * CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		 * The boot CPU calls this function before enabling NMI support,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		 * and as a result we'll never see this warning in the boot path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		 * for that CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		if (static_branch_unlikely(&gic_nonsecure_priorities))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 			WARN_ON(!group0 || gic_dist_security_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 			WARN_ON(group0 && !gic_dist_security_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	 * Some firmwares hand over to the kernel with the BPR changed from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	 * its reset value (and with a value large enough to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	 * any pre-emptive interrupts from working at all). Writing a zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	 * to BPR restores is reset value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	gic_write_bpr1(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	if (static_branch_likely(&supports_deactivate_key)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		/* EOI drops priority only (mode 1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		/* EOI deactivates interrupt too (mode 0) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	/* Always whack Group0 before Group1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	if (group0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		switch(pribits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		case 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 			write_gicreg(0, ICC_AP0R3_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 			write_gicreg(0, ICC_AP0R2_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 			write_gicreg(0, ICC_AP0R1_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 			write_gicreg(0, ICC_AP0R0_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	switch(pribits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	case 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		write_gicreg(0, ICC_AP1R3_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		write_gicreg(0, ICC_AP1R2_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		write_gicreg(0, ICC_AP1R1_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		write_gicreg(0, ICC_AP1R0_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	/* ... and let's hit the road... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	gic_write_grpen1(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	/* Keep the RSS capability status in per_cpu variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	/* Check all the CPUs have capable of sending SGIs to other CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	for_each_online_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		need_rss |= MPIDR_RS(cpu_logical_map(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		if (need_rss && (!have_rss))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 			pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 				cpu, (unsigned long)mpidr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 				i, (unsigned long)cpu_logical_map(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	 * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	 * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	 * UNPREDICTABLE choice of :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	 *   - The write is ignored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	 *   - The RS field is treated as 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	if (need_rss && (!gic_data.has_rss))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		pr_crit_once("RSS is required but GICD doesn't support it\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) static bool gicv3_nolpi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) static int __init gicv3_nolpi_cfg(char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	return strtobool(buf, &gicv3_nolpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) static int gic_dist_supports_lpis(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		!!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		!gicv3_nolpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) static void gic_cpu_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	void __iomem *rbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	/* Register ourselves with the rest of the world */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	if (gic_populate_rdist())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	gic_enable_redist(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	     !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	     "Distributor has extended ranges, but CPU%d doesn't\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	     smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	rbase = gic_data_rdist_sgi_base();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	/* Configure SGIs/PPIs as non-secure Group-1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	for (i = 0; i < gic_data.ppi_nr + 16; i += 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	/* initialise system registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	gic_cpu_sys_reg_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) #define MPIDR_TO_SGI_RS(mpidr)	(MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) #define MPIDR_TO_SGI_CLUSTER_ID(mpidr)	((mpidr) & ~0xFUL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) static int gic_starting_cpu(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	gic_cpu_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	if (gic_dist_supports_lpis())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		its_cpu_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 				   unsigned long cluster_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	int next_cpu, cpu = *base_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	unsigned long mpidr = cpu_logical_map(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	u16 tlist = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	while (cpu < nr_cpu_ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		tlist |= 1 << (mpidr & 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		next_cpu = cpumask_next(cpu, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		if (next_cpu >= nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		cpu = next_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		mpidr = cpu_logical_map(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 			cpu--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	*base_cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	return tlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) #define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	(MPIDR_AFFINITY_LEVEL(cluster_id, level) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		<< ICC_SGI1R_AFFINITY_## level ##_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3)	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	       MPIDR_TO_SGI_AFFINITY(cluster_id, 2)	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	       irq << ICC_SGI1R_SGI_ID_SHIFT		|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	       MPIDR_TO_SGI_AFFINITY(cluster_id, 1)	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	       MPIDR_TO_SGI_RS(cluster_id)		|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	       tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	gic_write_sgi1r(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	if (WARN_ON(d->hwirq >= 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	 * Ensure that stores to Normal memory are visible to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	 * other CPUs before issuing the IPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	for_each_cpu(cpu, mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		u16 tlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		tlist = gic_compute_target_list(&cpu, mask, cluster_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		gic_send_sgi(cluster_id, tlist, d->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	/* Force the above writes to ICC_SGI1R_EL1 to be executed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) static void __init gic_smp_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	struct irq_fwspec sgi_fwspec = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		.fwnode		= gic_data.fwnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		.param_count	= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	int base_sgi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 				  "irqchip/arm/gicv3:starting",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 				  gic_starting_cpu, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	/* Register all 8 non-secure SGIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	base_sgi = __irq_domain_alloc_irqs(gic_data.domain, -1, 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 					   NUMA_NO_NODE, &sgi_fwspec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 					   false, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	if (WARN_ON(base_sgi <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	set_smp_ipi_range(base_sgi, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 			    bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	u32 offset, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	void __iomem *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	int enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	if (force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		cpu = cpumask_first(mask_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		cpu = cpumask_any_and(mask_val, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	if (cpu >= nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	if (gic_irq_in_rdist(d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	/* If interrupt was enabled, disable it first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	enabled = gic_peek_irq(d, GICD_ISENABLER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	if (enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		gic_mask_irq(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	offset = convert_offset_index(d, GICD_IROUTER, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	reg = gic_dist_base(d) + offset + (index * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	trace_android_rvh_gic_v3_set_affinity(d, mask_val, &val, force, gic_dist_base(d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	gic_write_irouter(val, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	 * If the interrupt was enabled, enabled it again. Otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	 * just wait for the distributor to have digested our changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	if (enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		gic_unmask_irq(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 		gic_dist_wait_for_rwp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	return IRQ_SET_MASK_OK_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) #define gic_set_affinity	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) #define gic_ipi_send_mask	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) #define gic_smp_init()		do { } while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) static int gic_retrigger(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) #ifdef CONFIG_CPU_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) static int gic_cpu_pm_notifier(struct notifier_block *self,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 			       unsigned long cmd, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	if (cmd == CPU_PM_EXIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		if (gic_dist_security_disabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 			gic_enable_redist(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		gic_cpu_sys_reg_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	} else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		gic_write_grpen1(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		gic_enable_redist(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) static struct notifier_block gic_cpu_pm_notifier_block = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	.notifier_call = gic_cpu_pm_notifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) static void gic_cpu_pm_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) static inline void gic_cpu_pm_init(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) #endif /* CONFIG_CPU_PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) void gic_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	trace_android_vh_gic_resume(&gic_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) EXPORT_SYMBOL_GPL(gic_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) static struct syscore_ops gic_syscore_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	.resume = gic_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) static void gic_syscore_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	register_syscore_ops(&gic_syscore_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) static inline void gic_syscore_init(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) void gic_resume(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) static struct irq_chip gic_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	.name			= "GICv3",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	.irq_mask		= gic_mask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	.irq_unmask		= gic_unmask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	.irq_eoi		= gic_eoi_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	.irq_set_type		= gic_set_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	.irq_set_affinity	= gic_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	.irq_retrigger          = gic_retrigger,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	.irq_get_irqchip_state	= gic_irq_get_irqchip_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	.irq_set_irqchip_state	= gic_irq_set_irqchip_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	.irq_nmi_setup		= gic_irq_nmi_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	.irq_nmi_teardown	= gic_irq_nmi_teardown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	.ipi_send_mask		= gic_ipi_send_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	.flags			= IRQCHIP_SET_TYPE_MASKED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 				  IRQCHIP_SKIP_SET_WAKE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 				  IRQCHIP_MASK_ON_SUSPEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) static struct irq_chip gic_eoimode1_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	.name			= "GICv3",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	.irq_mask		= gic_eoimode1_mask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	.irq_unmask		= gic_unmask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	.irq_eoi		= gic_eoimode1_eoi_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	.irq_set_type		= gic_set_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	.irq_set_affinity	= gic_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	.irq_retrigger          = gic_retrigger,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	.irq_get_irqchip_state	= gic_irq_get_irqchip_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	.irq_set_irqchip_state	= gic_irq_set_irqchip_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	.irq_set_vcpu_affinity	= gic_irq_set_vcpu_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	.irq_nmi_setup		= gic_irq_nmi_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	.irq_nmi_teardown	= gic_irq_nmi_teardown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	.ipi_send_mask		= gic_ipi_send_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	.flags			= IRQCHIP_SET_TYPE_MASKED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 				  IRQCHIP_SKIP_SET_WAKE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 				  IRQCHIP_MASK_ON_SUSPEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 			      irq_hw_number_t hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	struct irq_chip *chip = &gic_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	if (static_branch_likely(&supports_deactivate_key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 		chip = &gic_eoimode1_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	switch (__get_intid_range(hw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	case SGI_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		irq_set_percpu_devid(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		irq_domain_set_info(d, irq, hw, chip, d->host_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 				    handle_percpu_devid_fasteoi_ipi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 				    NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	case PPI_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	case EPPI_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		irq_set_percpu_devid(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		irq_domain_set_info(d, irq, hw, chip, d->host_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 				    handle_percpu_devid_irq, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	case SPI_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	case ESPI_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 		irq_domain_set_info(d, irq, hw, chip, d->host_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 				    handle_fasteoi_irq, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		irq_set_probe(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 		irqd_set_single_target(irqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	case LPI_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 		if (!gic_dist_supports_lpis())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 			return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		irq_domain_set_info(d, irq, hw, chip, d->host_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 				    handle_fasteoi_irq, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	/* Prevents SW retriggers which mess up the ACK/EOI ordering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	irqd_set_handle_enforce_irqctx(irqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) static int gic_irq_domain_translate(struct irq_domain *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 				    struct irq_fwspec *fwspec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 				    unsigned long *hwirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 				    unsigned int *type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		*hwirq = fwspec->param[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		*type = IRQ_TYPE_EDGE_RISING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	if (is_of_node(fwspec->fwnode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		if (fwspec->param_count < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 		switch (fwspec->param[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 		case 0:			/* SPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 			*hwirq = fwspec->param[1] + 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		case 1:			/* PPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 			*hwirq = fwspec->param[1] + 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 		case 2:			/* ESPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 			*hwirq = fwspec->param[1] + ESPI_BASE_INTID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 		case 3:			/* EPPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 			*hwirq = fwspec->param[1] + EPPI_BASE_INTID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		case GIC_IRQ_TYPE_LPI:	/* LPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 			*hwirq = fwspec->param[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		case GIC_IRQ_TYPE_PARTITION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 			*hwirq = fwspec->param[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 			if (fwspec->param[1] >= 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 				*hwirq += EPPI_BASE_INTID - 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 				*hwirq += 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 		*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 		 * Make it clear that broken DTs are... broken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		 * Partitionned PPIs are an unfortunate exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 		WARN_ON(*type == IRQ_TYPE_NONE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 			fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	if (is_fwnode_irqchip(fwspec->fwnode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 		if(fwspec->param_count != 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		*hwirq = fwspec->param[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		*type = fwspec->param[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		WARN_ON(*type == IRQ_TYPE_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 				unsigned int nr_irqs, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	irq_hw_number_t hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	unsigned int type = IRQ_TYPE_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	struct irq_fwspec *fwspec = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	for (i = 0; i < nr_irqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 				unsigned int nr_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	for (i = 0; i < nr_irqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 		struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		irq_set_handler(virq + i, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		irq_domain_reset_irq_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) static int gic_irq_domain_select(struct irq_domain *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 				 struct irq_fwspec *fwspec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 				 enum irq_domain_bus_token bus_token)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	/* Not for us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)         if (fwspec->fwnode != d->fwnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	/* If this is not DT, then we have a single domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	if (!is_of_node(fwspec->fwnode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	 * If this is a PPI and we have a 4th (non-null) parameter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	 * then we need to match the partition domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	if (fwspec->param_count >= 4 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	    fwspec->param[0] == 1 && fwspec->param[3] != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	    gic_data.ppi_descs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 		return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	return d == gic_data.domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) static const struct irq_domain_ops gic_irq_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	.translate = gic_irq_domain_translate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	.alloc = gic_irq_domain_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	.free = gic_irq_domain_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	.select = gic_irq_domain_select,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) static int partition_domain_translate(struct irq_domain *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 				      struct irq_fwspec *fwspec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 				      unsigned long *hwirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 				      unsigned int *type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	if (!gic_data.ppi_descs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	np = of_find_node_by_phandle(fwspec->param[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	if (WARN_ON(!np))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 				     of_node_to_fwnode(np));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	*hwirq = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) static const struct irq_domain_ops partition_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	.translate = partition_domain_translate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	.select = gic_irq_domain_select,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) static bool gic_enable_quirk_msm8996(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	struct gic_chip_data *d = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) static bool gic_enable_quirk_cavium_38539(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	struct gic_chip_data *d = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) static bool gic_enable_quirk_hip06_07(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	struct gic_chip_data *d = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	 * HIP06 GICD_IIDR clashes with GIC-600 product number (despite
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	 * not being an actual ARM implementation). The saving grace is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	 * that GIC-600 doesn't have ESPI, so nothing to do in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	 * HIP07 doesn't even have a proper IIDR, and still pretends to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	 * have ESPI. In both cases, put them right.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	if (d->rdists.gicd_typer & GICD_TYPER_ESPI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 		/* Zero both ESPI and the RES0 field next to it... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 		d->rdists.gicd_typer &= ~GENMASK(9, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) static const struct gic_quirk gic_quirks[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 		.desc	= "GICv3: Qualcomm MSM8996 broken firmware",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 		.compatible = "qcom,msm8996-gic-v3",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 		.init	= gic_enable_quirk_msm8996,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 		.desc	= "GICv3: HIP06 erratum 161010803",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		.iidr	= 0x0204043b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		.mask	= 0xffffffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		.init	= gic_enable_quirk_hip06_07,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 		.desc	= "GICv3: HIP07 erratum 161010803",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 		.iidr	= 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 		.mask	= 0xffffffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 		.init	= gic_enable_quirk_hip06_07,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 		 * Reserved register accesses generate a Synchronous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 		 * External Abort. This erratum applies to:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 		 * - ThunderX: CN88xx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 		 * - OCTEON TX: CN83xx, CN81xx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		 * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 		.desc	= "GICv3: Cavium erratum 38539",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 		.iidr	= 0xa000034c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		.mask	= 0xe8f00fff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		.init	= gic_enable_quirk_cavium_38539,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) static void gic_enable_nmi_support(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	if (!gic_prio_masking_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	if (!ppi_nmi_refs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	for (i = 0; i < gic_data.ppi_nr; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 		refcount_set(&ppi_nmi_refs[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	 * Linux itself doesn't use 1:N distribution, so has no need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	 * set PMHE. The only reason to have it set is if EL3 requires it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	 * (and we can't change it).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	if (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 		static_branch_enable(&gic_pmr_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 		static_branch_unlikely(&gic_pmr_sync) ? "forced" : "relaxed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	 * How priority values are used by the GIC depends on two things:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	 * the security state of the GIC (controlled by the GICD_CTRL.DS bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	 * and if Group 0 interrupts can be delivered to Linux in the non-secure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	 * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	 * the ICC_PMR_EL1 register and the priority that software assigns to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	 * interrupts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	 * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	 * -----------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	 *      1       |      -      |  unchanged  |    unchanged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	 * -----------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	 *      0       |      1      |  non-secure |    non-secure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	 * -----------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	 *      0       |      0      |  unchanged  |    non-secure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	 * where non-secure means that the value is right-shifted by one and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	 * MSB bit set, to make it fit in the non-secure priority range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	 * In the first two cases, where ICC_PMR_EL1 and the interrupt priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	 * are both either modified or unchanged, we can use the same set of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	 * priorities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	 * In the last case, where only the interrupt priorities are modified to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	 * be in the non-secure range, we use a different PMR value to mask IRQs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	 * and the rest of the values that we use remain unchanged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	if (gic_has_group0() && !gic_dist_security_disabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 		static_branch_enable(&gic_nonsecure_priorities);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	static_branch_enable(&supports_pseudo_nmis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	if (static_branch_likely(&supports_deactivate_key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 		gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 		gic_chip.flags |= IRQCHIP_SUPPORTS_NMI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) static int __init gic_init_bases(void __iomem *dist_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 				 struct redist_region *rdist_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 				 u32 nr_redist_regions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 				 u64 redist_stride,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 				 struct fwnode_handle *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	u32 typer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	if (!is_hyp_mode_available())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 		static_branch_disable(&supports_deactivate_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	if (static_branch_likely(&supports_deactivate_key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 		pr_info("GIC: Using split EOI/Deactivate mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	gic_data.fwnode = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	gic_data.dist_base = dist_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	gic_data.redist_regions = rdist_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	gic_data.nr_redist_regions = nr_redist_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	gic_data.redist_stride = redist_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	 * Find out how many interrupts are supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	gic_data.rdists.gicd_typer = typer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 			  gic_quirks, &gic_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	 * ThunderX1 explodes on reading GICD_TYPER2, in violation of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	 * architecture spec (which says that reserved registers are RES0).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 		gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 						 &gic_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	gic_data.rdists.has_rvpeid = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	gic_data.rdists.has_vlpis = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	gic_data.rdists.has_direct_lpi = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	gic_data.rdists.has_vpend_valid_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	pr_info("Distributor has %sRange Selector support\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 		gic_data.has_rss ? "" : "no ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	if (typer & GICD_TYPER_MBIS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 		err = mbi_init(handle, gic_data.domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 			pr_err("Failed to initialize MBIs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	set_handle_irq(gic_handle_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	gic_update_rdist_properties();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	gic_dist_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	gic_cpu_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	gic_smp_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	gic_cpu_pm_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	gic_syscore_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	if (gic_dist_supports_lpis()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 		its_init(handle, &gic_data.rdists, gic_data.domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 		its_cpu_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 		if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 			gicv2m_init(handle, gic_data.domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	gic_enable_nmi_support();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	if (gic_data.domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 		irq_domain_remove(gic_data.domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	free_percpu(gic_data.rdists.rdist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) static int __init gic_validate_dist_version(void __iomem *dist_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) /* Create all possible partitions at boot time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	struct device_node *parts_node, *child_part;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	int part_idx = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	int nr_parts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	struct partition_affinity *parts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	if (!parts_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	if (!gic_data.ppi_descs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 	nr_parts = of_get_child_count(parts_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	if (!nr_parts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 		goto out_put_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	if (WARN_ON(!parts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 		goto out_put_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	for_each_child_of_node(parts_node, child_part) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 		struct partition_affinity *part;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 		int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 		part = &parts[part_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 		part->partition_id = of_node_to_fwnode(child_part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 		pr_info("GIC: PPI partition %pOFn[%d] { ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 			child_part, part_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 		n = of_property_count_elems_of_size(child_part, "affinity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 						    sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 		WARN_ON(n <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 		for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 			int err, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 			u32 cpu_phandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 			struct device_node *cpu_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 			err = of_property_read_u32_index(child_part, "affinity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 							 i, &cpu_phandle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 			if (WARN_ON(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 			cpu_node = of_find_node_by_phandle(cpu_phandle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 			if (WARN_ON(!cpu_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 			cpu = of_cpu_node_to_id(cpu_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 			if (WARN_ON(cpu < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 			pr_cont("%pOF[%d] ", cpu_node, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 			cpumask_set_cpu(cpu, &part->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 		pr_cont("}\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 		part_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	for (i = 0; i < gic_data.ppi_nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 		unsigned int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 		struct partition_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 		struct irq_fwspec ppi_fwspec = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 			.fwnode		= gic_data.fwnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 			.param_count	= 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 			.param		= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 				[0]	= GIC_IRQ_TYPE_PARTITION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 				[1]	= i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 				[2]	= IRQ_TYPE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 		irq = irq_create_fwspec_mapping(&ppi_fwspec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 		if (WARN_ON(!irq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 		desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 					     irq, &partition_domain_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 		if (WARN_ON(!desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		gic_data.ppi_descs[i] = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) out_put_node:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	of_node_put(parts_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) static void __init gic_of_setup_kvm_info(struct device_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	struct resource r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	u32 gicv_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	gic_v3_kvm_info.type = GIC_V3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	if (!gic_v3_kvm_info.maint_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	if (of_property_read_u32(node, "#redistributor-regions",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 				 &gicv_idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 		gicv_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	gicv_idx += 3;	/* Also skip GICD, GICC, GICH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	ret = of_address_to_resource(node, gicv_idx, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 		gic_v3_kvm_info.vcpu = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	gic_set_kvm_info(&gic_v3_kvm_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) static int __init gic_of_init(struct device_node *node, struct device_node *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	void __iomem *dist_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	struct redist_region *rdist_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	u64 redist_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	u32 nr_redist_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	dist_base = of_iomap(node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	if (!dist_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 		pr_err("%pOF: unable to map gic dist registers\n", node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	err = gic_validate_dist_version(dist_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 		pr_err("%pOF: no distributor detected, giving up\n", node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		goto out_unmap_dist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 		nr_redist_regions = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 			     GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	if (!rdist_regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 		goto out_unmap_dist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	for (i = 0; i < nr_redist_regions; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 		struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 		int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 		ret = of_address_to_resource(node, 1 + i, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 		rdist_regs[i].redist_base = of_iomap(node, 1 + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 		if (ret || !rdist_regs[i].redist_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 			pr_err("%pOF: couldn't map region %d\n", node, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 			err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 			goto out_unmap_rdist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 		rdist_regs[i].phys_base = res.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 		redist_stride = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	gic_enable_of_quirks(node, gic_quirks, &gic_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 			     redist_stride, &node->fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 		goto out_unmap_rdist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	gic_populate_ppi_partitions(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	if (static_branch_likely(&supports_deactivate_key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 		gic_of_setup_kvm_info(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) out_unmap_rdist:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	for (i = 0; i < nr_redist_regions; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 		if (rdist_regs[i].redist_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 			iounmap(rdist_regs[i].redist_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	kfree(rdist_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) out_unmap_dist:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	iounmap(dist_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) #ifdef CONFIG_ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) static struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	void __iomem *dist_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	struct redist_region *redist_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	u32 nr_redist_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	bool single_redist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	int enabled_rdists;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	u32 maint_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	int maint_irq_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	phys_addr_t vcpu_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) } acpi_data __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) static void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 	static int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	acpi_data.redist_regs[count].phys_base = phys_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	acpi_data.redist_regs[count].redist_base = redist_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 	count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 			   const unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 	struct acpi_madt_generic_redistributor *redist =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 			(struct acpi_madt_generic_redistributor *)header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 	void __iomem *redist_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	redist_base = ioremap(redist->base_address, redist->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	if (!redist_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 		pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	gic_acpi_register_redist(redist->base_address, redist_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 			 const unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	struct acpi_madt_generic_interrupt *gicc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 				(struct acpi_madt_generic_interrupt *)header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	void __iomem *redist_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	/* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	if (!(gicc->flags & ACPI_MADT_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	redist_base = ioremap(gicc->gicr_base_address, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	if (!redist_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) static int __init gic_acpi_collect_gicr_base(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	acpi_tbl_entry_handler redist_parser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	enum acpi_madt_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	if (acpi_data.single_redist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 		type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 		redist_parser = gic_acpi_parse_madt_gicc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 		type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 		redist_parser = gic_acpi_parse_madt_redist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	/* Collect redistributor base addresses in GICR entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	pr_info("No valid GICR entries exist\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 				  const unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	/* Subtable presence means that redist exists, that's it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 				      const unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	struct acpi_madt_generic_interrupt *gicc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 				(struct acpi_madt_generic_interrupt *)header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 	 * If GICC is enabled and has valid gicr base address, then it means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	 * GICR base is presented via GICC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 		acpi_data.enabled_rdists++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	 * It's perfectly valid firmware can pass disabled GICC entry, driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	 * should not treat as errors, skip the entry instead of probe fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	if (!(gicc->flags & ACPI_MADT_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 	return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) static int __init gic_acpi_count_gicr_regions(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	 * Count how many redistributor regions we have. It is not allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	 * to mix redistributor description, GICR and GICC subtables have to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	 * mutually exclusive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 				      gic_acpi_match_gicr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	if (count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 		acpi_data.single_redist = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 		return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 				      gic_acpi_match_gicc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	if (count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 		acpi_data.single_redist = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 		count = acpi_data.enabled_rdists;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 					   struct acpi_probe_entry *ape)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	struct acpi_madt_generic_distributor *dist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 	dist = (struct acpi_madt_generic_distributor *)header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 	if (dist->version != ape->driver_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 	/* We need to do that exercise anyway, the sooner the better */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 	count = gic_acpi_count_gicr_regions();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	if (count <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	acpi_data.nr_redist_regions = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 						const unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 	struct acpi_madt_generic_interrupt *gicc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 		(struct acpi_madt_generic_interrupt *)header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	int maint_irq_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	static int first_madt = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 	/* Skip unusable CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 	if (!(gicc->flags & ACPI_MADT_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 		ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	if (first_madt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 		first_madt = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 		acpi_data.maint_irq = gicc->vgic_interrupt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 		acpi_data.maint_irq_mode = maint_irq_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 		acpi_data.vcpu_base = gicc->gicv_base_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 	 * The maintenance interrupt and GICV should be the same for every CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 	    (acpi_data.maint_irq_mode != maint_irq_mode) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	    (acpi_data.vcpu_base != gicc->gicv_base_address))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) static bool __init gic_acpi_collect_virt_info(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 				      gic_acpi_parse_virt_madt_gicc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	return (count > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) #define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) #define ACPI_GICV2_VCTRL_MEM_SIZE	(SZ_4K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) #define ACPI_GICV2_VCPU_MEM_SIZE	(SZ_8K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) static void __init gic_acpi_setup_kvm_info(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 	if (!gic_acpi_collect_virt_info()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 		pr_warn("Unable to get hardware information used for virtualization\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	gic_v3_kvm_info.type = GIC_V3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 				acpi_data.maint_irq_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 				ACPI_ACTIVE_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 	if (irq <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 	gic_v3_kvm_info.maint_irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 	if (acpi_data.vcpu_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 		struct resource *vcpu = &gic_v3_kvm_info.vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 		vcpu->flags = IORESOURCE_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 		vcpu->start = acpi_data.vcpu_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 		vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 	gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	gic_set_kvm_info(&gic_v3_kvm_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	struct acpi_madt_generic_distributor *dist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	struct fwnode_handle *domain_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	/* Get distributor base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	dist = (struct acpi_madt_generic_distributor *)header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 	acpi_data.dist_base = ioremap(dist->base_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 				      ACPI_GICV3_DIST_MEM_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 	if (!acpi_data.dist_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 		pr_err("Unable to map GICD registers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	err = gic_validate_dist_version(acpi_data.dist_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 		pr_err("No distributor detected at @%p, giving up\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 		       acpi_data.dist_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 		goto out_dist_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 	size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	if (!acpi_data.redist_regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 		goto out_dist_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	err = gic_acpi_collect_gicr_base();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 		goto out_redist_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 	domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 	if (!domain_handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 		goto out_redist_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 			     acpi_data.nr_redist_regions, 0, domain_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 		goto out_fwhandle_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 	if (static_branch_likely(&supports_deactivate_key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 		gic_acpi_setup_kvm_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) out_fwhandle_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	irq_domain_free_fwnode(domain_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) out_redist_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 	for (i = 0; i < acpi_data.nr_redist_regions; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 		if (acpi_data.redist_regs[i].redist_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 			iounmap(acpi_data.redist_regs[i].redist_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	kfree(acpi_data.redist_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) out_dist_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 	iounmap(acpi_data.dist_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 		     acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 		     gic_acpi_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 		     acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 		     gic_acpi_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 		     acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 		     gic_acpi_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) #endif