^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Marc Zyngier <marc.zyngier@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/acpi_iort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/crash_dump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/dma-iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/efi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/msi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/of_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/syscore_ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/irqchip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/irqchip/arm-gic-v3.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/irqchip/arm-gic-v4.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/cputype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/exception.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "irq-gic-common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static u32 lpi_id_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * deal with (one configuration byte per interrupt). PENDBASE has to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define LPI_NRBITS lpi_id_bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * Collection structure - just an ID, and a redistributor address to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * ping. We use one per CPU as a bag of interrupts assigned to this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct its_collection {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) u64 target_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) u16 col_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * The ITS_BASER structure - contains memory information, cached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * value of BASER register configuration and ITS page size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct its_baser {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) void *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) u32 order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) u32 psz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct its_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * The ITS structure - contains most of the infrastructure, with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * top-level MSI domain, the command queue, the collections, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * list of devices writing to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * dev_alloc_lock has to be taken for device allocations, while the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * spinlock must be taken to parse data structures such as the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct its_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) raw_spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct mutex dev_alloc_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct list_head entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) void __iomem *sgir_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) phys_addr_t phys_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct its_cmd_block *cmd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct its_cmd_block *cmd_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct its_baser tables[GITS_BASER_NR_REGS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct its_collection *collections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct fwnode_handle *fwnode_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) u64 (*get_msi_base)(struct its_device *its_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) u64 typer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) u64 cbaser_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) u32 ctlr_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) u32 mpidr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct list_head its_device_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) u64 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) unsigned long list_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) int numa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) unsigned int msi_domain_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) u32 pre_its_base; /* for Socionext Synquacer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) int vlpi_redist_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define ITS_ITT_ALIGN SZ_256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* The maximum number of VPEID bits supported by VLPI commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define ITS_MAX_VPEID_BITS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int nvpeid = 16; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (gic_rdists->has_rvpeid && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) nvpeid = 1 + (gic_rdists->gicd_typer2 & \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) GICD_TYPER2_VID); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) nvpeid; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /* Convert page order to size in bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct event_lpi_map {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) unsigned long *lpi_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) u16 *col_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) irq_hw_number_t lpi_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int nr_lpis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) raw_spinlock_t vlpi_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct its_vm *vm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct its_vlpi_map *vlpi_maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) int nr_vlpis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * The ITS view of a device - belongs to an ITS, owns an interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * translation table, and a list of interrupts. If it some of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * LPIs are injected into a guest (GICv4), the event_map.vm field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * indicates which one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct its_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct list_head entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct its_node *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct event_lpi_map event_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) void *itt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) u32 nr_ites;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) u32 device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) bool shared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) raw_spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct its_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct its_vpe **vpes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) int next_victim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) } vpe_proxy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct cpu_lpi_count {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) atomic_t managed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) atomic_t unmanaged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static LIST_HEAD(its_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static DEFINE_RAW_SPINLOCK(its_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static struct rdists *gic_rdists;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static struct irq_domain *its_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static unsigned long its_list_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static u16 vmovp_seq_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static DEFINE_RAW_SPINLOCK(vmovp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static DEFINE_IDA(its_vpeid_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * always have vSGIs mapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static u16 get_its_list(struct its_vm *vm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct its_node *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) unsigned long its_list = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) list_for_each_entry(its, &its_nodes, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (!is_v4(its))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (require_its_list_vmovp(vm, its))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) __set_bit(its->list_nr, &its_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return (u16)its_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static inline u32 its_get_event_id(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct its_device *its_dev = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return d->hwirq - its_dev->event_map.lpi_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static struct its_collection *dev_event_to_col(struct its_device *its_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) u32 event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct its_node *its = its_dev->its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return its->collections + its_dev->event_map.col_map[event];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) u32 event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return &its_dev->event_map.vlpi_maps[event];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (irqd_is_forwarded_to_vcpu(d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct its_device *its_dev = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) u32 event = its_get_event_id(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return dev_event_to_vlpi_map(its_dev, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) raw_spin_lock_irqsave(&vpe->vpe_lock, *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return vpe->col_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct its_vlpi_map *map = get_vlpi_map(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) cpu = vpe_to_cpuid_lock(map->vpe, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* Physical LPIs are already locked via the irq_desc lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct its_device *its_dev = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) cpu = its_dev->event_map.col_map[its_get_event_id(d)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* Keep GCC quiet... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) *flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct its_vlpi_map *map = get_vlpi_map(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) vpe_to_cpuid_unlock(map->vpe, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static struct its_collection *valid_col(struct its_collection *col)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return col;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (valid_col(its->collections + vpe->col_idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * ITS command descriptors - parameters to be encoded in a command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct its_cmd_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct its_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) u32 event_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) } its_inv_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct its_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) u32 event_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) } its_clear_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct its_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) u32 event_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) } its_int_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct its_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) int valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) } its_mapd_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct its_collection *col;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) int valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) } its_mapc_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct its_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) u32 phys_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) u32 event_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) } its_mapti_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct its_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct its_collection *col;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) u32 event_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) } its_movi_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct its_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) u32 event_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) } its_discard_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct its_collection *col;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) } its_invall_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct its_vpe *vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) } its_vinvall_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct its_vpe *vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct its_collection *col;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) bool valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) } its_vmapp_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct its_vpe *vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct its_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) u32 virt_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) u32 event_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) bool db_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) } its_vmapti_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct its_vpe *vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct its_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) u32 event_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) bool db_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) } its_vmovi_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct its_vpe *vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct its_collection *col;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) u16 seq_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) u16 its_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) } its_vmovp_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct its_vpe *vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) } its_invdb_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct its_vpe *vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) u8 sgi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) u8 priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) bool enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) bool group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) bool clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) } its_vsgi_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * The ITS command block, which is what the ITS actually parses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) struct its_cmd_block {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) u64 raw_cmd[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) __le64 raw_cmd_le[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) #define ITS_CMD_QUEUE_SZ SZ_64K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct its_cmd_block *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct its_cmd_desc *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct its_cmd_block *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct its_cmd_desc *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) u64 mask = GENMASK_ULL(h, l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) *raw_cmd &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) *raw_cmd |= (val << l) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static void its_encode_size(struct its_cmd_block *cmd, u8 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) static void its_encode_valid(struct its_cmd_block *cmd, int valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) static void its_encode_vmapp_default_db(struct its_cmd_block *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) u32 vpe_db_lpi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) static void its_encode_vmovp_default_db(struct its_cmd_block *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) u32 vpe_db_lpi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static void its_encode_db(struct its_cmd_block *cmd, bool db)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) its_mask_encode(&cmd->raw_cmd[2], db, 63, 63);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) its_mask_encode(&cmd->raw_cmd[0], en, 8, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) static inline void its_fixup_cmd(struct its_cmd_block *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /* Let's fixup BE commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) static struct its_collection *its_build_mapd_cmd(struct its_node *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct its_cmd_block *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct its_cmd_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) unsigned long itt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) its_encode_cmd(cmd, GITS_CMD_MAPD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) its_encode_size(cmd, size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) its_encode_itt(cmd, itt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) its_encode_valid(cmd, desc->its_mapd_cmd.valid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) its_fixup_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) static struct its_collection *its_build_mapc_cmd(struct its_node *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) struct its_cmd_block *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) struct its_cmd_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) its_encode_cmd(cmd, GITS_CMD_MAPC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) its_encode_valid(cmd, desc->its_mapc_cmd.valid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) its_fixup_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return desc->its_mapc_cmd.col;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) static struct its_collection *its_build_mapti_cmd(struct its_node *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) struct its_cmd_block *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) struct its_cmd_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct its_collection *col;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) col = dev_event_to_col(desc->its_mapti_cmd.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) desc->its_mapti_cmd.event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) its_encode_cmd(cmd, GITS_CMD_MAPTI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) its_encode_collection(cmd, col->col_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) its_fixup_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) return valid_col(col);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) static struct its_collection *its_build_movi_cmd(struct its_node *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) struct its_cmd_block *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) struct its_cmd_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) struct its_collection *col;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) col = dev_event_to_col(desc->its_movi_cmd.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) desc->its_movi_cmd.event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) its_encode_cmd(cmd, GITS_CMD_MOVI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) its_fixup_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) return valid_col(col);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) static struct its_collection *its_build_discard_cmd(struct its_node *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) struct its_cmd_block *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) struct its_cmd_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) struct its_collection *col;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) col = dev_event_to_col(desc->its_discard_cmd.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) desc->its_discard_cmd.event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) its_encode_cmd(cmd, GITS_CMD_DISCARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) its_fixup_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return valid_col(col);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) static struct its_collection *its_build_inv_cmd(struct its_node *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct its_cmd_block *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) struct its_cmd_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) struct its_collection *col;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) col = dev_event_to_col(desc->its_inv_cmd.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) desc->its_inv_cmd.event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) its_encode_cmd(cmd, GITS_CMD_INV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) its_fixup_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) return valid_col(col);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) static struct its_collection *its_build_int_cmd(struct its_node *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct its_cmd_block *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct its_cmd_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) struct its_collection *col;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) col = dev_event_to_col(desc->its_int_cmd.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) desc->its_int_cmd.event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) its_encode_cmd(cmd, GITS_CMD_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) its_encode_event_id(cmd, desc->its_int_cmd.event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) its_fixup_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return valid_col(col);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) static struct its_collection *its_build_clear_cmd(struct its_node *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) struct its_cmd_block *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) struct its_cmd_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) struct its_collection *col;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) col = dev_event_to_col(desc->its_clear_cmd.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) desc->its_clear_cmd.event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) its_encode_cmd(cmd, GITS_CMD_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) its_fixup_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return valid_col(col);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) static struct its_collection *its_build_invall_cmd(struct its_node *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) struct its_cmd_block *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) struct its_cmd_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) its_encode_cmd(cmd, GITS_CMD_INVALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) its_encode_collection(cmd, desc->its_invall_cmd.col->col_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) its_fixup_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) return desc->its_invall_cmd.col;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) struct its_cmd_block *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) struct its_cmd_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) its_encode_cmd(cmd, GITS_CMD_VINVALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) its_fixup_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) return valid_vpe(its, desc->its_vinvall_cmd.vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) struct its_cmd_block *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) struct its_cmd_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) unsigned long vpt_addr, vconf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) u64 target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) bool alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) its_encode_cmd(cmd, GITS_CMD_VMAPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (!desc->its_vmapp_cmd.valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (is_v4_1(its)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) its_encode_alloc(cmd, alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) its_encode_target(cmd, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) its_encode_vpt_addr(cmd, vpt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) its_encode_vpt_size(cmd, LPI_NRBITS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (!is_v4_1(its))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) its_encode_alloc(cmd, alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /* We can only signal PTZ when alloc==1. Why do we have two bits? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) its_encode_ptz(cmd, alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) its_encode_vconf_addr(cmd, vconf_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) its_fixup_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) return valid_vpe(its, desc->its_vmapp_cmd.vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) struct its_cmd_block *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct its_cmd_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) u32 db;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) db = 1023;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) its_encode_cmd(cmd, GITS_CMD_VMAPTI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) its_encode_db_phys_id(cmd, db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) its_fixup_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) return valid_vpe(its, desc->its_vmapti_cmd.vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) struct its_cmd_block *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) struct its_cmd_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) u32 db;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) db = 1023;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) its_encode_cmd(cmd, GITS_CMD_VMOVI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) its_encode_db_phys_id(cmd, db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) its_encode_db_valid(cmd, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) its_fixup_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return valid_vpe(its, desc->its_vmovi_cmd.vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) struct its_cmd_block *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) struct its_cmd_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) u64 target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) its_encode_cmd(cmd, GITS_CMD_VMOVP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) its_encode_target(cmd, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (is_v4_1(its)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) its_encode_db(cmd, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) its_fixup_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) return valid_vpe(its, desc->its_vmovp_cmd.vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) struct its_cmd_block *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct its_cmd_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) struct its_vlpi_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) desc->its_inv_cmd.event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) its_encode_cmd(cmd, GITS_CMD_INV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) its_fixup_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) return valid_vpe(its, map->vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) static struct its_vpe *its_build_vint_cmd(struct its_node *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) struct its_cmd_block *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) struct its_cmd_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) struct its_vlpi_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) map = dev_event_to_vlpi_map(desc->its_int_cmd.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) desc->its_int_cmd.event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) its_encode_cmd(cmd, GITS_CMD_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) its_encode_event_id(cmd, desc->its_int_cmd.event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) its_fixup_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) return valid_vpe(its, map->vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) struct its_cmd_block *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) struct its_cmd_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) struct its_vlpi_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) desc->its_clear_cmd.event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) its_encode_cmd(cmd, GITS_CMD_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) its_fixup_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) return valid_vpe(its, map->vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) struct its_cmd_block *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) struct its_cmd_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (WARN_ON(!is_v4_1(its)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) its_encode_cmd(cmd, GITS_CMD_INVDB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) its_fixup_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) return valid_vpe(its, desc->its_invdb_cmd.vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) static struct its_vpe *its_build_vsgi_cmd(struct its_node *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) struct its_cmd_block *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) struct its_cmd_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (WARN_ON(!is_v4_1(its)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) its_encode_cmd(cmd, GITS_CMD_VSGI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) its_fixup_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) return valid_vpe(its, desc->its_vsgi_cmd.vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) static u64 its_cmd_ptr_to_offset(struct its_node *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) struct its_cmd_block *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) return (ptr - its->cmd_base) * sizeof(*ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) static int its_queue_full(struct its_node *its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) int widx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) int ridx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) widx = its->cmd_write - its->cmd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) /* This is incredibly unlikely to happen, unless the ITS locks up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) static struct its_cmd_block *its_allocate_entry(struct its_node *its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) struct its_cmd_block *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) u32 count = 1000000; /* 1s! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) while (its_queue_full(its)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (!count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) pr_err_ratelimited("ITS queue not draining\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) cmd = its->cmd_write++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) /* Handle queue wrapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) its->cmd_write = its->cmd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) /* Clear command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) cmd->raw_cmd[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) cmd->raw_cmd[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) cmd->raw_cmd[2] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) cmd->raw_cmd[3] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) return cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) static struct its_cmd_block *its_post_commands(struct its_node *its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) writel_relaxed(wr, its->base + GITS_CWRITER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) return its->cmd_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * Make sure the commands written to memory are observable by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * the ITS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) dsb(ishst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) static int its_wait_for_range_completion(struct its_node *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) u64 prev_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) struct its_cmd_block *to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) u64 rd_idx, to_idx, linear_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) u32 count = 1000000; /* 1s! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) /* Linearize to_idx if the command set has wrapped around */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) to_idx = its_cmd_ptr_to_offset(its, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (to_idx < prev_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) to_idx += ITS_CMD_QUEUE_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) linear_idx = prev_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) s64 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) rd_idx = readl_relaxed(its->base + GITS_CREADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) * Compute the read pointer progress, taking the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) * potential wrap-around into account.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) delta = rd_idx - prev_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (rd_idx < prev_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) delta += ITS_CMD_QUEUE_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) linear_idx += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (linear_idx >= to_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) if (!count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) to_idx, linear_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) prev_idx = rd_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) /* Warning, macro hell follows */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) void name(struct its_node *its, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) buildtype builder, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) struct its_cmd_desc *desc) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) synctype *sync_obj; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) unsigned long flags; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) u64 rd_idx; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) raw_spin_lock_irqsave(&its->lock, flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) cmd = its_allocate_entry(its); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (!cmd) { /* We're soooooo screewed... */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) raw_spin_unlock_irqrestore(&its->lock, flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) return; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) sync_obj = builder(its, cmd, desc); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) its_flush_cmd(its, cmd); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (sync_obj) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) sync_cmd = its_allocate_entry(its); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (!sync_cmd) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) goto post; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) buildfn(its, sync_cmd, sync_obj); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) its_flush_cmd(its, sync_cmd); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) post: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) rd_idx = readl_relaxed(its->base + GITS_CREADR); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) next_cmd = its_post_commands(its); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) raw_spin_unlock_irqrestore(&its->lock, flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) static void its_build_sync_cmd(struct its_node *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) struct its_cmd_block *sync_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) struct its_collection *sync_col)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) its_encode_target(sync_cmd, sync_col->target_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) its_fixup_cmd(sync_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) struct its_collection, its_build_sync_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) static void its_build_vsync_cmd(struct its_node *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) struct its_cmd_block *sync_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) struct its_vpe *sync_vpe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) its_fixup_cmd(sync_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) struct its_vpe, its_build_vsync_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) static void its_send_int(struct its_device *dev, u32 event_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) struct its_cmd_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) desc.its_int_cmd.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) desc.its_int_cmd.event_id = event_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) its_send_single_command(dev->its, its_build_int_cmd, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) static void its_send_clear(struct its_device *dev, u32 event_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) struct its_cmd_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) desc.its_clear_cmd.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) desc.its_clear_cmd.event_id = event_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) its_send_single_command(dev->its, its_build_clear_cmd, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) static void its_send_inv(struct its_device *dev, u32 event_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) struct its_cmd_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) desc.its_inv_cmd.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) desc.its_inv_cmd.event_id = event_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) its_send_single_command(dev->its, its_build_inv_cmd, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) static void its_send_mapd(struct its_device *dev, int valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) struct its_cmd_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) desc.its_mapd_cmd.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) desc.its_mapd_cmd.valid = !!valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) static void its_send_mapc(struct its_node *its, struct its_collection *col,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) int valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) struct its_cmd_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) desc.its_mapc_cmd.col = col;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) desc.its_mapc_cmd.valid = !!valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) its_send_single_command(its, its_build_mapc_cmd, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) struct its_cmd_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) desc.its_mapti_cmd.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) desc.its_mapti_cmd.phys_id = irq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) desc.its_mapti_cmd.event_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) static void its_send_movi(struct its_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) struct its_collection *col, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) struct its_cmd_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) desc.its_movi_cmd.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) desc.its_movi_cmd.col = col;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) desc.its_movi_cmd.event_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) its_send_single_command(dev->its, its_build_movi_cmd, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) static void its_send_discard(struct its_device *dev, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) struct its_cmd_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) desc.its_discard_cmd.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) desc.its_discard_cmd.event_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) its_send_single_command(dev->its, its_build_discard_cmd, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) static void its_send_invall(struct its_node *its, struct its_collection *col)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) struct its_cmd_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) desc.its_invall_cmd.col = col;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) its_send_single_command(its, its_build_invall_cmd, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) static void its_send_vmapti(struct its_device *dev, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) struct its_cmd_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) desc.its_vmapti_cmd.vpe = map->vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) desc.its_vmapti_cmd.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) desc.its_vmapti_cmd.virt_id = map->vintid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) desc.its_vmapti_cmd.event_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) desc.its_vmapti_cmd.db_enabled = map->db_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) static void its_send_vmovi(struct its_device *dev, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) struct its_cmd_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) desc.its_vmovi_cmd.vpe = map->vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) desc.its_vmovi_cmd.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) desc.its_vmovi_cmd.event_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) desc.its_vmovi_cmd.db_enabled = map->db_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) static void its_send_vmapp(struct its_node *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) struct its_vpe *vpe, bool valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) struct its_cmd_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) desc.its_vmapp_cmd.vpe = vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) desc.its_vmapp_cmd.valid = valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) static void its_send_vmovp(struct its_vpe *vpe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) struct its_cmd_desc desc = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) struct its_node *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) int col_id = vpe->col_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) desc.its_vmovp_cmd.vpe = vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) if (!its_list_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) its = list_first_entry(&its_nodes, struct its_node, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) desc.its_vmovp_cmd.col = &its->collections[col_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * Yet another marvel of the architecture. If using the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) * its_list "feature", we need to make sure that all ITSs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) * receive all VMOVP commands in the same order. The only way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) * to guarantee this is to make vmovp a serialization point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) * Wall <-- Head.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) raw_spin_lock_irqsave(&vmovp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) /* Emit VMOVPs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) list_for_each_entry(its, &its_nodes, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) if (!is_v4(its))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) if (!require_its_list_vmovp(vpe->its_vm, its))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) desc.its_vmovp_cmd.col = &its->collections[col_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) raw_spin_unlock_irqrestore(&vmovp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) struct its_cmd_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) desc.its_vinvall_cmd.vpe = vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) static void its_send_vinv(struct its_device *dev, u32 event_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) struct its_cmd_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) * There is no real VINV command. This is just a normal INV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) * with a VSYNC instead of a SYNC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) desc.its_inv_cmd.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) desc.its_inv_cmd.event_id = event_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) static void its_send_vint(struct its_device *dev, u32 event_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) struct its_cmd_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) * There is no real VINT command. This is just a normal INT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * with a VSYNC instead of a SYNC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) desc.its_int_cmd.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) desc.its_int_cmd.event_id = event_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) static void its_send_vclear(struct its_device *dev, u32 event_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) struct its_cmd_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) * There is no real VCLEAR command. This is just a normal CLEAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * with a VSYNC instead of a SYNC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) desc.its_clear_cmd.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) desc.its_clear_cmd.event_id = event_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) struct its_cmd_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) desc.its_invdb_cmd.vpe = vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) its_send_single_vcommand(its, its_build_invdb_cmd, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * irqchip functions - assumes MSI, mostly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) struct its_vlpi_map *map = get_vlpi_map(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) irq_hw_number_t hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) void *va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) u8 *cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) if (map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) va = page_address(map->vm->vprop_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) hwirq = map->vintid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) /* Remember the updated property */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) map->properties &= ~clr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) map->properties |= set | LPI_PROP_GROUP1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) va = gic_rdists->prop_table_va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) hwirq = d->hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) cfg = va + hwirq - 8192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) *cfg &= ~clr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) *cfg |= set | LPI_PROP_GROUP1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) * Make the above write visible to the redistributors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) * And yes, we're flushing exactly: One. Single. Byte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) * Humpf...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) dsb(ishst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) static void wait_for_syncr(void __iomem *rdbase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) while (readl_relaxed(rdbase + GICR_SYNCR) & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) static void direct_lpi_inv(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) struct its_vlpi_map *map = get_vlpi_map(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) void __iomem *rdbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) if (map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) struct its_device *its_dev = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) WARN_ON(!is_v4_1(its_dev->its));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) val = GICR_INVLPIR_V;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) val = d->hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) /* Target the redistributor this LPI is currently routed to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) cpu = irq_to_cpuid_lock(d, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) gic_write_lpir(val, rdbase + GICR_INVLPIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) wait_for_syncr(rdbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) irq_to_cpuid_unlock(d, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) struct its_device *its_dev = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) lpi_write_config(d, clr, set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) if (gic_rdists->has_direct_lpi &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) direct_lpi_inv(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) else if (!irqd_is_forwarded_to_vcpu(d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) its_send_inv(its_dev, its_get_event_id(d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) its_send_vinv(its_dev, its_get_event_id(d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) struct its_device *its_dev = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) u32 event = its_get_event_id(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) struct its_vlpi_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) * GICv4.1 does away with the per-LPI nonsense, nothing to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) * here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) if (is_v4_1(its_dev->its))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) map = dev_event_to_vlpi_map(its_dev, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (map->db_enabled == enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) map->db_enabled = enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) * More fun with the architecture:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) * value or to 1023, depending on the enable bit. But that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) * would be issueing a mapping for an /existing/ DevID+EventID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) * to the /same/ vPE, using this opportunity to adjust the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) * doorbell. Mouahahahaha. We loves it, Precious.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) its_send_vmovi(its_dev, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) static void its_mask_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) if (irqd_is_forwarded_to_vcpu(d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) its_vlpi_set_doorbell(d, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) lpi_update_config(d, LPI_PROP_ENABLED, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) static void its_unmask_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (irqd_is_forwarded_to_vcpu(d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) its_vlpi_set_doorbell(d, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) lpi_update_config(d, 0, LPI_PROP_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) if (irqd_affinity_is_managed(d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) static void its_inc_lpi_count(struct irq_data *d, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (irqd_affinity_is_managed(d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) static void its_dec_lpi_count(struct irq_data *d, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) if (irqd_affinity_is_managed(d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) static unsigned int cpumask_pick_least_loaded(struct irq_data *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) const struct cpumask *cpu_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) unsigned int cpu = nr_cpu_ids, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) int count = S32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) for_each_cpu(tmp, cpu_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) int this_count = its_read_lpi_count(d, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) if (this_count < count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) cpu = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) count = this_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) return cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) * As suggested by Thomas Gleixner in:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) static int its_select_cpu(struct irq_data *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) const struct cpumask *aff_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) struct its_device *its_dev = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) cpumask_var_t tmpmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) int cpu, node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) node = its_dev->its->numa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) if (!irqd_affinity_is_managed(d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) /* First try the NUMA node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) if (node != NUMA_NO_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) * Try the intersection of the affinity mask and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) * node mask (and the online mask, just to be safe).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) cpumask_and(tmpmask, cpumask_of_node(node), aff_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) cpumask_and(tmpmask, tmpmask, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) * Ideally, we would check if the mask is empty, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) * try again on the full node here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) * But it turns out that the way ACPI describes the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) * affinity for ITSs only deals about memory, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) * not target CPUs, so it cannot describe a single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) * ITS placed next to two NUMA nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) * Instead, just fallback on the online mask. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) * diverges from Thomas' suggestion above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) cpu = cpumask_pick_least_loaded(d, tmpmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) if (cpu < nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) /* If we can't cross sockets, give up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) /* If the above failed, expand the search */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) /* Try the intersection of the affinity and online masks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) cpumask_and(tmpmask, aff_mask, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) /* If that doesn't fly, the online mask is the last resort */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) if (cpumask_empty(tmpmask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) cpumask_copy(tmpmask, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) cpu = cpumask_pick_least_loaded(d, tmpmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) cpumask_and(tmpmask, irq_data_get_affinity_mask(d), cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) /* If we cannot cross sockets, limit the search to that node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) node != NUMA_NO_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) cpumask_and(tmpmask, tmpmask, cpumask_of_node(node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) cpu = cpumask_pick_least_loaded(d, tmpmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) free_cpumask_var(tmpmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) return cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) struct its_device *its_dev = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) struct its_collection *target_col;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) u32 id = its_get_event_id(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) int cpu, prev_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) /* A forwarded interrupt should use irq_set_vcpu_affinity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) if (irqd_is_forwarded_to_vcpu(d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) prev_cpu = its_dev->event_map.col_map[id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) its_dec_lpi_count(d, prev_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) if (!force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) cpu = its_select_cpu(d, mask_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) cpu = cpumask_pick_least_loaded(d, mask_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) if (cpu < 0 || cpu >= nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) /* don't set the affinity when the target cpu is same as current one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) if (cpu != prev_cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) target_col = &its_dev->its->collections[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) its_send_movi(its_dev, target_col, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) its_dev->event_map.col_map[id] = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) irq_data_update_effective_affinity(d, cpumask_of(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) its_inc_lpi_count(d, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) return IRQ_SET_MASK_OK_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) its_inc_lpi_count(d, prev_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) static u64 its_irq_get_msi_base(struct its_device *its_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) struct its_node *its = its_dev->its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) return its->phys_base + GITS_TRANSLATER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) struct its_device *its_dev = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) struct its_node *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) its = its_dev->its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) addr = its->get_msi_base(its_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) msg->address_lo = lower_32_bits(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) msg->address_hi = upper_32_bits(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) msg->data = its_get_event_id(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) static int its_irq_set_irqchip_state(struct irq_data *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) enum irqchip_irq_state which,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) bool state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) struct its_device *its_dev = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) u32 event = its_get_event_id(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) if (which != IRQCHIP_STATE_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) if (irqd_is_forwarded_to_vcpu(d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) its_send_vint(its_dev, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) its_send_vclear(its_dev, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) if (state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) its_send_int(its_dev, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) its_send_clear(its_dev, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) static int its_irq_retrigger(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) return !its_irq_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) * Two favourable cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) * for vSGI delivery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) * and we're better off mapping all VPEs always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) * If neither (a) nor (b) is true, then we map vPEs on demand.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) static bool gic_requires_eager_mapping(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) if (!its_list_map || gic_rdists->has_rvpeid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) static void its_map_vm(struct its_node *its, struct its_vm *vm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (gic_requires_eager_mapping())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) raw_spin_lock_irqsave(&vmovp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) * If the VM wasn't mapped yet, iterate over the vpes and get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) * them mapped now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) vm->vlpi_count[its->list_nr]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (vm->vlpi_count[its->list_nr] == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) for (i = 0; i < vm->nr_vpes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) struct its_vpe *vpe = vm->vpes[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) struct irq_data *d = irq_get_irq_data(vpe->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) /* Map the VPE to the first possible CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) vpe->col_idx = cpumask_first(cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) its_send_vmapp(its, vpe, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) its_send_vinvall(its, vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) raw_spin_unlock_irqrestore(&vmovp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) /* Not using the ITS list? Everything is always mapped. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) if (gic_requires_eager_mapping())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) raw_spin_lock_irqsave(&vmovp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) if (!--vm->vlpi_count[its->list_nr]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) for (i = 0; i < vm->nr_vpes; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) its_send_vmapp(its, vm->vpes[i], false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) raw_spin_unlock_irqrestore(&vmovp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) struct its_device *its_dev = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) u32 event = its_get_event_id(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) if (!info->map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) raw_spin_lock(&its_dev->event_map.vlpi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) if (!its_dev->event_map.vm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) struct its_vlpi_map *maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (!maps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) its_dev->event_map.vm = info->map->vm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) its_dev->event_map.vlpi_maps = maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) } else if (its_dev->event_map.vm != info->map->vm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) /* Get our private copy of the mapping information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) its_dev->event_map.vlpi_maps[event] = *info->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) if (irqd_is_forwarded_to_vcpu(d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) /* Already mapped, move it around */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) its_send_vmovi(its_dev, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) /* Ensure all the VPEs are mapped on this ITS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) its_map_vm(its_dev->its, info->map->vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) * Flag the interrupt as forwarded so that we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) * start poking the virtual property table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) irqd_set_forwarded_to_vcpu(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) /* Write out the property to the prop table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) lpi_write_config(d, 0xff, info->map->properties);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) /* Drop the physical mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) its_send_discard(its_dev, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) /* and install the virtual one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) its_send_vmapti(its_dev, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) /* Increment the number of VLPIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) its_dev->event_map.nr_vlpis++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) raw_spin_unlock(&its_dev->event_map.vlpi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) struct its_device *its_dev = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) struct its_vlpi_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) raw_spin_lock(&its_dev->event_map.vlpi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) map = get_vlpi_map(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) if (!its_dev->event_map.vm || !map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) /* Copy our mapping information to the incoming request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) *info->map = *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) raw_spin_unlock(&its_dev->event_map.vlpi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) static int its_vlpi_unmap(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) struct its_device *its_dev = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) u32 event = its_get_event_id(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) raw_spin_lock(&its_dev->event_map.vlpi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) /* Drop the virtual mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) its_send_discard(its_dev, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) /* and restore the physical one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) irqd_clr_forwarded_to_vcpu(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) its_send_mapti(its_dev, d->hwirq, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) LPI_PROP_ENABLED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) LPI_PROP_GROUP1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) /* Potentially unmap the VM from this ITS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) its_unmap_vm(its_dev->its, its_dev->event_map.vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) * Drop the refcount and make the device available again if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) * this was the last VLPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) if (!--its_dev->event_map.nr_vlpis) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) its_dev->event_map.vm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) kfree(its_dev->event_map.vlpi_maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) raw_spin_unlock(&its_dev->event_map.vlpi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) struct its_device *its_dev = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) lpi_update_config(d, 0xff, info->config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) lpi_write_config(d, 0xff, info->config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) struct its_device *its_dev = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) struct its_cmd_info *info = vcpu_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) /* Need a v4 ITS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) if (!is_v4(its_dev->its))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) /* Unmap request? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) if (!info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) return its_vlpi_unmap(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) switch (info->cmd_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) case MAP_VLPI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) return its_vlpi_map(d, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) case GET_VLPI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) return its_vlpi_get(d, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) case PROP_UPDATE_VLPI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) case PROP_UPDATE_AND_INV_VLPI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) return its_vlpi_prop_update(d, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) static struct irq_chip its_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) .name = "ITS",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) .irq_mask = its_mask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) .irq_unmask = its_unmask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) .irq_eoi = irq_chip_eoi_parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) .irq_set_affinity = its_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) .irq_compose_msi_msg = its_irq_compose_msi_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) .irq_set_irqchip_state = its_irq_set_irqchip_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) .irq_retrigger = its_irq_retrigger,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) * How we allocate LPIs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) * lpi_range_list contains ranges of LPIs that are to available to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) * allocate from. To allocate LPIs, just pick the first range that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) * fits the required allocation, and reduce it by the required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) * amount. Once empty, remove the range from the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) * To free a range of LPIs, add a free range to the list, sort it and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) * merge the result if the new range happens to be adjacent to an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) * already free block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) * The consequence of the above is that allocation is cost is low, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) * freeing is expensive. We assumes that freeing rarely occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) static DEFINE_MUTEX(lpi_range_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) static LIST_HEAD(lpi_range_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) struct lpi_range {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) struct list_head entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) u32 base_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) u32 span;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) static struct lpi_range *mk_lpi_range(u32 base, u32 span)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) struct lpi_range *range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) range = kmalloc(sizeof(*range), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) if (range) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) range->base_id = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) range->span = span;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) return range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) static int alloc_lpi_range(u32 nr_lpis, u32 *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) struct lpi_range *range, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) int err = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) mutex_lock(&lpi_range_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) if (range->span >= nr_lpis) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) *base = range->base_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) range->base_id += nr_lpis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) range->span -= nr_lpis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) if (range->span == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) list_del(&range->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) kfree(range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) mutex_unlock(&lpi_range_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) if (a->base_id + a->span != b->base_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) b->base_id = a->base_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) b->span += a->span;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) list_del(&a->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) kfree(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) static int free_lpi_range(u32 base, u32 nr_lpis)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) struct lpi_range *new, *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) new = mk_lpi_range(base, nr_lpis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) mutex_lock(&lpi_range_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) list_for_each_entry_reverse(old, &lpi_range_list, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) if (old->base_id < base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) * old is the last element with ->base_id smaller than base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) * so new goes right after it. If there are no elements with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) * ->base_id smaller than base, &old->entry ends up pointing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) * at the head of the list, and inserting new it the start of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) * the list is the right thing to do in that case as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) list_add(&new->entry, &old->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) * Now check if we can merge with the preceding and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) * following ranges.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) merge_lpi_ranges(old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) merge_lpi_ranges(new, list_next_entry(new, entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) mutex_unlock(&lpi_range_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) static int __init its_lpi_init(u32 id_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) u32 lpis = (1UL << id_bits) - 8192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) u32 numlpis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) lpis = numlpis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) lpis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) * Initializing the allocator is just the same as freeing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) * full range of LPIs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) err = free_lpi_range(8192, lpis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) unsigned long *bitmap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) err = alloc_lpi_range(nr_irqs, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) nr_irqs /= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) } while (nr_irqs > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) if (!nr_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) err = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) if (!bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) *nr_ids = nr_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) if (!bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) *base = *nr_ids = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) return bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) WARN_ON(free_lpi_range(base, nr_ids));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) kfree(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) static void gic_reset_prop_table(void *va)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) /* Priority 0xa0, Group-1, disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) /* Make sure the GIC will observe the written configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) static struct page *its_allocate_prop_table(gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) struct page *prop_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) if (of_machine_is_compatible("rockchip,rk3568") || of_machine_is_compatible("rockchip,rk3566"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) gfp_flags |= GFP_DMA32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) if (!prop_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) gic_reset_prop_table(page_address(prop_page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) return prop_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) static void its_free_prop_table(struct page *prop_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) free_pages((unsigned long)page_address(prop_page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) get_order(LPI_PROPBASE_SZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) phys_addr_t start, end, addr_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) u64 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) * We don't bother checking for a kdump kernel as by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) * construction, the LPI tables are out of this kernel's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) * memory map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) if (is_kdump_kernel())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) addr_end = addr + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) for_each_reserved_mem_range(i, &start, &end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) if (addr >= start && addr_end <= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) /* Not found, not a good sign... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) &addr, &addr_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) static int gic_reserve_range(phys_addr_t addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) if (efi_enabled(EFI_CONFIG_TABLES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) return efi_mem_reserve_persistent(addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) static int __init its_setup_lpi_prop_table(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) LPI_PROPBASE_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) MEMREMAP_WB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) gic_reset_prop_table(gic_rdists->prop_table_va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) lpi_id_bits = min_t(u32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) ITS_MAX_LPI_NRBITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) page = its_allocate_prop_table(GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) pr_err("Failed to allocate PROPBASE\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) gic_rdists->prop_table_pa = page_to_phys(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) gic_rdists->prop_table_va = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) LPI_PROPBASE_SZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) pr_info("GICv3: using LPI property table @%pa\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) &gic_rdists->prop_table_pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) return its_lpi_init(lpi_id_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) static const char *its_base_type_string[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) [GITS_BASER_TYPE_DEVICE] = "Devices",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) u32 idx = baser - its->tables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) return gits_read_baser(its->base + GITS_BASER + (idx << 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) static void its_write_baser(struct its_node *its, struct its_baser *baser,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) u32 idx = baser - its->tables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) baser->val = its_read_baser(its, baser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) static int its_setup_baser(struct its_node *its, struct its_baser *baser,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) u64 cache, u64 shr, u32 order, bool indirect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) u64 val = its_read_baser(its, baser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) u64 esz = GITS_BASER_ENTRY_SIZE(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) u64 type = GITS_BASER_TYPE(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) u64 baser_phys, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) u32 alloc_pages, psz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) void *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) gfp_t gfp_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) psz = baser->psz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) if (alloc_pages > GITS_BASER_PAGES_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) &its->phys_base, its_base_type_string[type],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) alloc_pages, GITS_BASER_PAGES_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) alloc_pages = GITS_BASER_PAGES_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) order = get_order(GITS_BASER_PAGES_MAX * psz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) gfp_flags = GFP_KERNEL | __GFP_ZERO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) if (of_machine_is_compatible("rockchip,rk3568") || of_machine_is_compatible("rockchip,rk3566"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) gfp_flags |= GFP_DMA32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) page = alloc_pages_node(its->numa_node, gfp_flags, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) base = (void *)page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) baser_phys = virt_to_phys(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) /* Check if the physical address of the memory is above 48bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) /* 52bit PA is supported only when PageSize=64K */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) if (psz != SZ_64K) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) free_pages((unsigned long)base, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) /* Convert 52bit PA to 48bit field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) retry_baser:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) val = (baser_phys |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) (type << GITS_BASER_TYPE_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) cache |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) shr |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) GITS_BASER_VALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) val |= indirect ? GITS_BASER_INDIRECT : 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) switch (psz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) case SZ_4K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) val |= GITS_BASER_PAGE_SIZE_4K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) case SZ_16K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) val |= GITS_BASER_PAGE_SIZE_16K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) case SZ_64K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) val |= GITS_BASER_PAGE_SIZE_64K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) its_write_baser(its, baser, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) tmp = baser->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) if (IS_ENABLED(CONFIG_NO_GKI) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) (of_machine_is_compatible("rockchip,rk3568") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) of_machine_is_compatible("rockchip,rk3566") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) of_machine_is_compatible("rockchip,rk3588"))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) if (tmp & GITS_BASER_SHAREABILITY_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) tmp &= ~GITS_BASER_SHAREABILITY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) * Shareability didn't stick. Just use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) * whatever the read reported, which is likely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) * to be the only thing this redistributor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) * supports. If that's zero, make it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) * non-cacheable as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) shr = tmp & GITS_BASER_SHAREABILITY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) if (!shr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) cache = GITS_BASER_nC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) goto retry_baser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) if (val != tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) &its->phys_base, its_base_type_string[type],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) val, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) free_pages((unsigned long)base, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) baser->order = order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) baser->base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) baser->psz = psz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) its_base_type_string[type],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) (unsigned long)virt_to_phys(base),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) indirect ? "indirect" : "flat", (int)esz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) static bool its_parse_indirect_baser(struct its_node *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) struct its_baser *baser,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) u32 *order, u32 ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) u64 tmp = its_read_baser(its, baser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) u64 type = GITS_BASER_TYPE(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) u32 new_order = *order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) u32 psz = baser->psz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) bool indirect = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) /* No need to enable Indirection if memory requirement < (psz*2)bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) if ((esz << ids) > (psz * 2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) * Find out whether hw supports a single or two-level table by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) * table by reading bit at offset '62' after writing '1' to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) indirect = !!(baser->val & GITS_BASER_INDIRECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) if (indirect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) * The size of the lvl2 table is equal to ITS page size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) * which is 'psz'. For computing lvl1 table size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) * subtract ID bits that sparse lvl2 table from 'ids'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) * which is reported by ITS hardware times lvl1 table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) * entry size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) ids -= ilog2(psz / (int)esz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) esz = GITS_LVL1_ENTRY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) * Allocate as many entries as required to fit the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) * range of device IDs that the ITS can grok... The ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) * space being incredibly sparse, this results in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) * massive waste of memory if two-level device table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) * feature is not supported by hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) new_order = max_t(u32, get_order(esz << ids), new_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) if (new_order >= MAX_ORDER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) new_order = MAX_ORDER - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) &its->phys_base, its_base_type_string[type],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) device_ids(its), ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) *order = new_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) return indirect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) static u32 compute_common_aff(u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) u32 aff, clpiaff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) aff = FIELD_GET(GICR_TYPER_AFFINITY, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) return aff & ~(GENMASK(31, 0) >> (clpiaff * 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) static u32 compute_its_aff(struct its_node *its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) u32 svpet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) * the resulting affinity. We then use that to see if this match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) * our own affinity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) return compute_common_aff(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) static struct its_node *find_sibling_its(struct its_node *cur_its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) struct its_node *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) u32 aff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) aff = compute_its_aff(cur_its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) list_for_each_entry(its, &its_nodes, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) u64 baser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) if (!is_v4_1(its) || its == cur_its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) if (aff != compute_its_aff(its))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) baser = its->tables[2].val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) if (!(baser & GITS_BASER_VALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) return its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) static void its_free_tables(struct its_node *its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) for (i = 0; i < GITS_BASER_NR_REGS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) if (its->tables[i].base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) free_pages((unsigned long)its->tables[i].base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) its->tables[i].order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) its->tables[i].base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) u64 psz = SZ_64K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) while (psz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) u64 val, gpsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) val = its_read_baser(its, baser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) val &= ~GITS_BASER_PAGE_SIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) switch (psz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) case SZ_64K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) gpsz = GITS_BASER_PAGE_SIZE_64K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) case SZ_16K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) gpsz = GITS_BASER_PAGE_SIZE_16K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) case SZ_4K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) gpsz = GITS_BASER_PAGE_SIZE_4K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) its_write_baser(its, baser, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) switch (psz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) case SZ_64K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) psz = SZ_16K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) case SZ_16K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) psz = SZ_4K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) case SZ_4K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) baser->psz = psz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) static int its_alloc_tables(struct its_node *its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) u64 shr = GITS_BASER_InnerShareable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) u64 cache = GITS_BASER_RaWaWb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) /* erratum 24313: ignore memory access type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) cache = GITS_BASER_nCnB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) for (i = 0; i < GITS_BASER_NR_REGS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) struct its_baser *baser = its->tables + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) u64 val = its_read_baser(its, baser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) u64 type = GITS_BASER_TYPE(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) bool indirect = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) u32 order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) if (type == GITS_BASER_TYPE_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) if (its_probe_baser_psz(its, baser)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) its_free_tables(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) order = get_order(baser->psz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) case GITS_BASER_TYPE_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) indirect = its_parse_indirect_baser(its, baser, &order,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) device_ids(its));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) case GITS_BASER_TYPE_VCPU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) if (is_v4_1(its)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) struct its_node *sibling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) WARN_ON(i != 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) if ((sibling = find_sibling_its(its))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) *baser = sibling->tables[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) its_write_baser(its, baser, baser->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) indirect = its_parse_indirect_baser(its, baser, &order,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) ITS_MAX_VPEID_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) err = its_setup_baser(its, baser, cache, shr, order, indirect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) its_free_tables(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) /* Update settings which will be used for next BASERn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) static u64 inherit_vpe_l1_table_from_its(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) struct its_node *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) u32 aff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) aff = compute_common_aff(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) list_for_each_entry(its, &its_nodes, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) u64 baser, addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) if (!is_v4_1(its))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) if (aff != compute_its_aff(its))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) baser = its->tables[2].val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) if (!(baser & GITS_BASER_VALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) /* We have a winner! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) gic_data_rdist()->vpe_l1_base = its->tables[2].base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) val = GICR_VPROPBASER_4_1_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) if (baser & GITS_BASER_INDIRECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) val |= GICR_VPROPBASER_4_1_INDIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) case GIC_PAGE_SIZE_64K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) addr = GITS_BASER_ADDR_48_to_52(baser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) addr = baser & GENMASK_ULL(47, 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) u32 aff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) aff = compute_common_aff(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) if (!base || cpu == smp_processor_id())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) val = gic_read_typer(base + GICR_TYPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) if (aff != compute_common_aff(val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) * At this point, we have a victim. This particular CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) * has already booted, and has an affinity that matches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) * ours wrt CommonLPIAff. Let's use its own VPROPBASER.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) * Make sure we don't write the Z bit in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) val &= ~GICR_VPROPBASER_4_1_Z;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) static bool allocate_vpe_l2_table(int cpu, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) unsigned int psz, esz, idx, npg, gpsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) __le64 *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) if (!gic_rdists->has_rvpeid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) /* Skip non-present CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) if (!base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) npg = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) switch (gpsz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) case GIC_PAGE_SIZE_4K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) psz = SZ_4K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) case GIC_PAGE_SIZE_16K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) psz = SZ_16K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) case GIC_PAGE_SIZE_64K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) psz = SZ_64K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) /* Don't allow vpe_id that exceeds single, flat table limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) if (!(val & GICR_VPROPBASER_4_1_INDIRECT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) return (id < (npg * psz / (esz * SZ_8)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) /* Compute 1st level table index & check if that exceeds table limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) idx = id >> ilog2(psz / (esz * SZ_8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) table = gic_data_rdist_cpu(cpu)->vpe_l1_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) /* Allocate memory for 2nd level table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) if (!table[idx]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) /* Flush Lvl2 table to PoC if hw doesn't support coherency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) gic_flush_dcache_to_poc(page_address(page), psz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) /* Ensure updated table contents are visible to RD hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) dsb(sy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) static int allocate_vpe_l1_table(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) u64 val, gpsz, npg, pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) unsigned int psz = SZ_64K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) unsigned int np, epp, esz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) if (!gic_rdists->has_rvpeid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) * if VPENDBASER.Valid is set, disable any previously programmed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) * VPE by setting PendingLast while clearing Valid. This has the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) * effect of making sure no doorbell will be generated and we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) * then safely clear VPROPBASER.Valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) vlpi_base + GICR_VPENDBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) * If we can inherit the configuration from another RD, let's do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) * so. Otherwise, we have to go through the allocation process. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) * assume that all RDs have the exact same requirements, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) * nothing will work otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) if (val & GICR_VPROPBASER_4_1_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) if (!gic_data_rdist()->vpe_table_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) val = inherit_vpe_l1_table_from_its();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) if (val & GICR_VPROPBASER_4_1_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) /* First probe the page size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) switch (gpsz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) gpsz = GIC_PAGE_SIZE_4K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) case GIC_PAGE_SIZE_4K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) psz = SZ_4K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) case GIC_PAGE_SIZE_16K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) psz = SZ_16K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) case GIC_PAGE_SIZE_64K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) psz = SZ_64K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) * Start populating the register from scratch, including RO fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) * (which we want to print in debug cases...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) /* How many entries per GIC page? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) esz++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) epp = psz / (esz * SZ_8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) * If we need more than just a single L1 page, flag the table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) * as indirect and compute the number of required L1 pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) if (epp < ITS_MAX_VPEID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) int nl2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) val |= GICR_VPROPBASER_4_1_INDIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) /* Number of L2 pages required to cover the VPEID space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) /* Number of L1 pages to point to the L2 pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) npg = DIV_ROUND_UP(nl2 * SZ_8, psz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) npg = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) /* Right, that's the number of CPU pages we need for L1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) np = DIV_ROUND_UP(npg * psz, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) np, npg, psz, epp, esz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) gic_data_rdist()->vpe_l1_base = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) pa = virt_to_phys(page_address(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) WARN_ON(!IS_ALIGNED(pa, psz));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) val |= GICR_VPROPBASER_RaWb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) val |= GICR_VPROPBASER_InnerShareable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) val |= GICR_VPROPBASER_4_1_Z;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) val |= GICR_VPROPBASER_4_1_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) smp_processor_id(), val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) cpumask_pr_args(gic_data_rdist()->vpe_table_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) static int its_alloc_collections(struct its_node *its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) if (!its->collections)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) for (i = 0; i < nr_cpu_ids; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) its->collections[i].target_address = ~0ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) static struct page *its_allocate_pending_table(gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) struct page *pend_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) if (of_machine_is_compatible("rockchip,rk3568") || of_machine_is_compatible("rockchip,rk3566"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) gfp_flags |= GFP_DMA32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) get_order(LPI_PENDBASE_SZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) if (!pend_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) /* Make sure the GIC will observe the zero-ed page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) return pend_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) static void its_free_pending_table(struct page *pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) * Booting with kdump and LPIs enabled is generally fine. Any other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) * case is wrong in the absence of firmware/EFI support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) static bool enabled_lpis_allowed(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) phys_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) /* Check whether the property table is in a reserved region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) addr = val & GENMASK_ULL(51, 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) return gic_check_reserved_range(addr, LPI_PROPBASE_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) static int __init allocate_lpi_tables(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) int err, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) * If LPIs are enabled while we run this from the boot CPU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) * flag the RD tables as pre-allocated if the stars do align.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) pr_info("GICv3: Using preallocated redistributor tables\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) err = its_setup_lpi_prop_table();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) * We allocate all the pending tables anyway, as we may have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) * mix of RDs that have had LPIs enabled, and some that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) * don't. We'll free the unused ones as each CPU comes online.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) struct page *pend_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) pend_page = its_allocate_pending_table(GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) if (!pend_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) gic_data_rdist_cpu(cpu)->pend_page = pend_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) u32 count = 1000000; /* 1s! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) bool clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) val &= ~GICR_VPENDBASER_Valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) val &= ~clr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) val |= set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) clean = !(val & GICR_VPENDBASER_Dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) if (!clean) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) } while (!clean && count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) if (unlikely(val & GICR_VPENDBASER_Dirty)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) pr_err_ratelimited("ITS virtual pending table not cleaning\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) val |= GICR_VPENDBASER_PendingLast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) static void its_cpu_init_lpis(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) void __iomem *rbase = gic_data_rdist_rd_base();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) struct page *pend_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) phys_addr_t paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) u64 val, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) if (gic_data_rdist()->lpi_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) val = readl_relaxed(rbase + GICR_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) (val & GICR_CTLR_ENABLE_LPIS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) * Check that we get the same property table on all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) * RDs. If we don't, this is hopeless.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) paddr &= GENMASK_ULL(51, 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) if (WARN_ON(gic_rdists->prop_table_pa != paddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) paddr &= GENMASK_ULL(51, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) its_free_pending_table(gic_data_rdist()->pend_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) gic_data_rdist()->pend_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) pend_page = gic_data_rdist()->pend_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) paddr = page_to_phys(pend_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) /* set PROPBASE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) val = (gic_rdists->prop_table_pa |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) GICR_PROPBASER_InnerShareable |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) GICR_PROPBASER_RaWaWb |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) gicr_write_propbaser(val, rbase + GICR_PROPBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) if (IS_ENABLED(CONFIG_NO_GKI) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) (of_machine_is_compatible("rockchip,rk3568") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) of_machine_is_compatible("rockchip,rk3566") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) of_machine_is_compatible("rockchip,rk3588")))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) * The HW reports non-shareable, we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) * remove the cacheability attributes as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) * well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) GICR_PROPBASER_CACHEABILITY_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) val |= GICR_PROPBASER_nC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) gicr_write_propbaser(val, rbase + GICR_PROPBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) pr_info_once("GIC: using cache flushing for LPI property table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) /* set PENDBASE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) val = (page_to_phys(pend_page) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) GICR_PENDBASER_InnerShareable |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) GICR_PENDBASER_RaWaWb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) if (IS_ENABLED(CONFIG_NO_GKI) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) (of_machine_is_compatible("rockchip,rk3568") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) of_machine_is_compatible("rockchip,rk3566") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) of_machine_is_compatible("rockchip,rk3588")))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) * The HW reports non-shareable, we must remove the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) * cacheability attributes as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) GICR_PENDBASER_CACHEABILITY_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) val |= GICR_PENDBASER_nC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) /* Enable LPIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) val = readl_relaxed(rbase + GICR_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) val |= GICR_CTLR_ENABLE_LPIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) writel_relaxed(val, rbase + GICR_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) * It's possible for CPU to receive VLPIs before it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) * sheduled as a vPE, especially for the first CPU, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) * VLPI with INTID larger than 2^(IDbits+1) will be considered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) * as out of range and dropped by GIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) * So we initialize IDbits to known value to avoid VLPI drop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) smp_processor_id(), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) * Also clear Valid bit of GICR_VPENDBASER, in case some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) * ancient programming gets left in and has possibility of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) * corrupting memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) val = its_clear_vpend_valid(vlpi_base, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) if (allocate_vpe_l1_table()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) * If the allocation has failed, we're in massive trouble.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) * Disable direct injection, and pray that no VM was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) * already running...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) gic_rdists->has_rvpeid = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) gic_rdists->has_vlpis = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) /* Make sure the GIC has seen the above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) dsb(sy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) gic_data_rdist()->lpi_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) smp_processor_id(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) gic_data_rdist()->pend_page ? "allocated" : "reserved",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) &paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) static void its_cpu_init_collection(struct its_node *its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) u64 target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) /* avoid cross node collections and its mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) struct device_node *cpu_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) cpu_node = of_get_cpu_node(cpu, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) if (its->numa_node != NUMA_NO_NODE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) its->numa_node != of_node_to_nid(cpu_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) * We now have to bind each collection to its target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) * redistributor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) * This ITS wants the physical address of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) * redistributor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) target = gic_data_rdist()->phys_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) /* This ITS wants a linear CPU number. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) target = GICR_TYPER_CPU_NUMBER(target) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) /* Perform collection mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) its->collections[cpu].target_address = target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) its->collections[cpu].col_id = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) its_send_mapc(its, &its->collections[cpu], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) its_send_invall(its, &its->collections[cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) static void its_cpu_init_collections(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) struct its_node *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) raw_spin_lock(&its_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) list_for_each_entry(its, &its_nodes, entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) its_cpu_init_collection(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) raw_spin_unlock(&its_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) struct its_device *its_dev = NULL, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) raw_spin_lock_irqsave(&its->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) list_for_each_entry(tmp, &its->its_device_list, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) if (tmp->device_id == dev_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) its_dev = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) raw_spin_unlock_irqrestore(&its->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) return its_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) static struct its_baser *its_get_baser(struct its_node *its, u32 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) for (i = 0; i < GITS_BASER_NR_REGS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) if (GITS_BASER_TYPE(its->tables[i].val) == type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) return &its->tables[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) static bool its_alloc_table_entry(struct its_node *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) struct its_baser *baser, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) u32 esz, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) __le64 *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) /* Don't allow device id that exceeds single, flat table limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) esz = GITS_BASER_ENTRY_SIZE(baser->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) if (!(baser->val & GITS_BASER_INDIRECT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) /* Compute 1st level table index & check if that exceeds table limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) idx = id >> ilog2(baser->psz / esz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) table = baser->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) /* Allocate memory for 2nd level table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) if (!table[idx]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) if (of_machine_is_compatible("rockchip,rk3568") || of_machine_is_compatible("rockchip,rk3566"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) gfp_flags |= GFP_DMA32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) page = alloc_pages_node(its->numa_node, gfp_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) get_order(baser->psz));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) /* Flush Lvl2 table to PoC if hw doesn't support coherency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) gic_flush_dcache_to_poc(page_address(page), baser->psz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) /* Ensure updated table contents are visible to ITS hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) dsb(sy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) struct its_baser *baser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) /* Don't allow device id that exceeds ITS hardware limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) if (!baser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) return (ilog2(dev_id) < device_ids(its));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) return its_alloc_table_entry(its, baser, dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) static bool its_alloc_vpe_table(u32 vpe_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) struct its_node *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) * Make sure the L2 tables are allocated on *all* v4 ITSs. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) * could try and only do it on ITSs corresponding to devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) * that have interrupts targeted at this VPE, but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) * complexity becomes crazy (and you have tons of memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) * anyway, right?).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) list_for_each_entry(its, &its_nodes, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) struct its_baser *baser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) if (!is_v4(its))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) if (!baser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) if (!its_alloc_table_entry(its, baser, vpe_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) /* Non v4.1? No need to iterate RDs and go back early. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) if (!gic_rdists->has_rvpeid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) * Make sure the L2 tables are allocated for all copies of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) * the L1 table on *all* v4.1 RDs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) if (!allocate_vpe_l2_table(cpu, vpe_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) int nvecs, bool alloc_lpis)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) struct its_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) unsigned long *lpi_map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) u16 *col_map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) void *itt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) int lpi_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) int nr_lpis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) int nr_ites;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) int sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) gfp_t gfp_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) if (!its_alloc_device_table(its, dev_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) if (WARN_ON(!is_power_of_2(nvecs)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) nvecs = roundup_pow_of_two(nvecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) dev = kzalloc(sizeof(*dev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) * Even if the device wants a single LPI, the ITT must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) * sized as a power of two (and you need at least one bit...).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) nr_ites = max(2, nvecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) gfp_flags = GFP_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) if (of_machine_is_compatible("rockchip,rk3568") || of_machine_is_compatible("rockchip,rk3566"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) gfp_flags |= GFP_DMA32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) itt = kzalloc_node(sz, gfp_flags, its->numa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) if (alloc_lpis) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) if (lpi_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) col_map = kcalloc(nr_lpis, sizeof(*col_map),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) nr_lpis = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) lpi_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) kfree(itt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) kfree(lpi_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) kfree(col_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) gic_flush_dcache_to_poc(itt, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) dev->its = its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) dev->itt = itt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) dev->nr_ites = nr_ites;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) dev->event_map.lpi_map = lpi_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) dev->event_map.col_map = col_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) dev->event_map.lpi_base = lpi_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) dev->event_map.nr_lpis = nr_lpis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) raw_spin_lock_init(&dev->event_map.vlpi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) dev->device_id = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) INIT_LIST_HEAD(&dev->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) raw_spin_lock_irqsave(&its->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) list_add(&dev->entry, &its->its_device_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) raw_spin_unlock_irqrestore(&its->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) /* Map device to its ITT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) its_send_mapd(dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) return dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) static void its_free_device(struct its_device *its_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) raw_spin_lock_irqsave(&its_dev->its->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) list_del(&its_dev->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) kfree(its_dev->event_map.col_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) kfree(its_dev->itt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) kfree(its_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) /* Find a free LPI region in lpi_map and allocate them. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) idx = bitmap_find_free_region(dev->event_map.lpi_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) dev->event_map.nr_lpis,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) get_count_order(nvecs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) if (idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) *hwirq = dev->event_map.lpi_base + idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) int nvec, msi_alloc_info_t *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) struct its_node *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) struct its_device *its_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) struct msi_domain_info *msi_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) u32 dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) * We ignore "dev" entirely, and rely on the dev_id that has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) * been passed via the scratchpad. This limits this domain's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) * usefulness to upper layers that definitely know that they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) * are built on top of the ITS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) dev_id = info->scratchpad[0].ul;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) msi_info = msi_get_domain_info(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) its = msi_info->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) if (!gic_rdists->has_direct_lpi &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) vpe_proxy.dev &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) vpe_proxy.dev->its == its &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) dev_id == vpe_proxy.dev->device_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) /* Bad luck. Get yourself a better implementation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) mutex_lock(&its->dev_alloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) its_dev = its_find_device(its, dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) if (its_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) * We already have seen this ID, probably through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) * another alias (PCI bridge of some sort). No need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) * create the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) its_dev->shared = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) pr_debug("Reusing ITT for devID %x\n", dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) its_dev = its_create_device(its, dev_id, nvec, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) if (!its_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) mutex_unlock(&its->dev_alloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) info->scratchpad[0].ptr = its_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) static struct msi_domain_ops its_msi_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) .msi_prepare = its_msi_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) static int its_irq_gic_domain_alloc(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) unsigned int virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) irq_hw_number_t hwirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) struct irq_fwspec fwspec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) if (irq_domain_get_of_node(domain->parent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) fwspec.fwnode = domain->parent->fwnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) fwspec.param_count = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) fwspec.param[0] = GIC_IRQ_TYPE_LPI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) fwspec.param[1] = hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) fwspec.fwnode = domain->parent->fwnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) fwspec.param_count = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) fwspec.param[0] = hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) unsigned int nr_irqs, void *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) msi_alloc_info_t *info = args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) struct its_device *its_dev = info->scratchpad[0].ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) struct its_node *its = its_dev->its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) struct irq_data *irqd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) irq_hw_number_t hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) for (i = 0; i < nr_irqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) irq_domain_set_hwirq_and_chip(domain, virq + i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) hwirq + i, &its_irq_chip, its_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) irqd = irq_get_irq_data(virq + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) irqd_set_single_target(irqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) irqd_set_affinity_on_activate(irqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) pr_debug("ID:%d pID:%d vID:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) (int)(hwirq + i - its_dev->event_map.lpi_base),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) (int)(hwirq + i), virq + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) static int its_irq_domain_activate(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) struct irq_data *d, bool reserve)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) struct its_device *its_dev = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) u32 event = its_get_event_id(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) cpu = its_select_cpu(d, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) if (cpu < 0 || cpu >= nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) its_inc_lpi_count(d, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) its_dev->event_map.col_map[event] = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) irq_data_update_effective_affinity(d, cpumask_of(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) /* Map the GIC IRQ and event to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) its_send_mapti(its_dev, d->hwirq, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) static void its_irq_domain_deactivate(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) struct its_device *its_dev = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) u32 event = its_get_event_id(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) its_dec_lpi_count(d, its_dev->event_map.col_map[event]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) /* Stop the delivery of interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) its_send_discard(its_dev, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) unsigned int nr_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) struct irq_data *d = irq_domain_get_irq_data(domain, virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) struct its_device *its_dev = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) struct its_node *its = its_dev->its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) bitmap_release_region(its_dev->event_map.lpi_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) its_get_event_id(irq_domain_get_irq_data(domain, virq)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) get_count_order(nr_irqs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) for (i = 0; i < nr_irqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) struct irq_data *data = irq_domain_get_irq_data(domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) virq + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) /* Nuke the entry in the domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) irq_domain_reset_irq_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) mutex_lock(&its->dev_alloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) * If all interrupts have been freed, start mopping the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) * floor. This is conditionned on the device not being shared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) if (!its_dev->shared &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) bitmap_empty(its_dev->event_map.lpi_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) its_dev->event_map.nr_lpis)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) its_lpi_free(its_dev->event_map.lpi_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) its_dev->event_map.lpi_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) its_dev->event_map.nr_lpis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) /* Unmap device/itt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) its_send_mapd(its_dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) its_free_device(its_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) mutex_unlock(&its->dev_alloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) irq_domain_free_irqs_parent(domain, virq, nr_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) static const struct irq_domain_ops its_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) .alloc = its_irq_domain_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) .free = its_irq_domain_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) .activate = its_irq_domain_activate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) .deactivate = its_irq_domain_deactivate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) * This is insane.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) * If a GICv4.0 doesn't implement Direct LPIs (which is extremely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) * likely), the only way to perform an invalidate is to use a fake
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) * device to issue an INV command, implying that the LPI has first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) * been mapped to some event on that device. Since this is not exactly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) * cheap, we try to keep that mapping around as long as possible, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) * only issue an UNMAP if we're short on available slots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) * Broken by design(tm).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) * GICv4.1, on the other hand, mandates that we're able to invalidate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) * by writing to a MMIO register. It doesn't implement the whole of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) * DirectLPI, but that's good enough. And most of the time, we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) * even have to invalidate anything, as the redistributor can be told
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) * whether to generate a doorbell or not (we thus leave it enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) * always).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) /* GICv4.1 doesn't use a proxy, so nothing to do here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) if (gic_rdists->has_rvpeid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) /* Already unmapped? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) if (vpe->vpe_proxy_event == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) * We don't track empty slots at all, so let's move the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) * next_victim pointer if we can quickly reuse that slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) * instead of nuking an existing entry. Not clear that this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) * always a win though, and this might just generate a ripple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) * effect... Let's just hope VPEs don't migrate too often.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) if (vpe_proxy.vpes[vpe_proxy.next_victim])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) vpe_proxy.next_victim = vpe->vpe_proxy_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) vpe->vpe_proxy_event = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) /* GICv4.1 doesn't use a proxy, so nothing to do here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) if (gic_rdists->has_rvpeid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) if (!gic_rdists->has_direct_lpi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) its_vpe_db_proxy_unmap_locked(vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) /* GICv4.1 doesn't use a proxy, so nothing to do here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) if (gic_rdists->has_rvpeid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) /* Already mapped? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) if (vpe->vpe_proxy_event != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) /* This slot was already allocated. Kick the other VPE out. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) if (vpe_proxy.vpes[vpe_proxy.next_victim])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) /* Map the new VPE instead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) vpe->vpe_proxy_event = vpe_proxy.next_victim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) struct its_collection *target_col;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) /* GICv4.1 doesn't use a proxy, so nothing to do here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) if (gic_rdists->has_rvpeid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) if (gic_rdists->has_direct_lpi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) void __iomem *rdbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) wait_for_syncr(rdbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) its_vpe_db_proxy_map_locked(vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) target_col = &vpe_proxy.dev->its->collections[to];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) static int its_vpe_set_affinity(struct irq_data *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) const struct cpumask *mask_val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) int from, cpu = cpumask_first(mask_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) * Changing affinity is mega expensive, so let's be as lazy as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) * we can and only do it if we really have to. Also, if mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) * into the proxy device, we need to move the doorbell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) * interrupt to its new location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) * Another thing is that changing the affinity of a vPE affects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) * *other interrupts* such as all the vLPIs that are routed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) * this vPE. This means that the irq_desc lock is not enough to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) * protect us, and that we must ensure nobody samples vpe->col_idx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) * during the update, hence the lock below which must also be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) * taken on any vLPI handling path that evaluates vpe->col_idx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) from = vpe_to_cpuid_lock(vpe, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) if (from == cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) vpe->col_idx = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) * is sharing its VPE table with the current one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) if (gic_data_rdist_cpu(cpu)->vpe_table_mask &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) its_send_vmovp(vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) its_vpe_db_proxy_move(vpe, from, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) irq_data_update_effective_affinity(d, cpumask_of(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) vpe_to_cpuid_unlock(vpe, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) return IRQ_SET_MASK_OK_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) static void its_wait_vpt_parse_complete(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) if (!gic_rdists->has_vpend_valid_dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) !(val & GICR_VPENDBASER_Dirty),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) 10, 500));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) static void its_vpe_schedule(struct its_vpe *vpe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) /* Schedule the VPE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) GENMASK_ULL(51, 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) val |= GICR_VPROPBASER_RaWb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) val |= GICR_VPROPBASER_InnerShareable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) val = virt_to_phys(page_address(vpe->vpt_page)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) GENMASK_ULL(51, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) val |= GICR_VPENDBASER_RaWaWb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) val |= GICR_VPENDBASER_InnerShareable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) * There is no good way of finding out if the pending table is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) * empty as we can race against the doorbell interrupt very
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) * easily. So in the end, vpe->pending_last is only an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) * indication that the vcpu has something pending, not one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) * that the pending table is empty. A good implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) * would be able to read its coarse map pretty quickly anyway,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) * making this a tolerable issue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) val |= GICR_VPENDBASER_PendingLast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) val |= GICR_VPENDBASER_Valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) static void its_vpe_deschedule(struct its_vpe *vpe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) val = its_clear_vpend_valid(vlpi_base, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) static void its_vpe_invall(struct its_vpe *vpe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) struct its_node *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) list_for_each_entry(its, &its_nodes, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) if (!is_v4(its))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) * Sending a VINVALL to a single ITS is enough, as all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) * we need is to reach the redistributors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) its_send_vinvall(its, vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) struct its_cmd_info *info = vcpu_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) switch (info->cmd_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) case SCHEDULE_VPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) its_vpe_schedule(vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) case DESCHEDULE_VPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) its_vpe_deschedule(vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) case COMMIT_VPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) its_wait_vpt_parse_complete();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) case INVALL_VPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) its_vpe_invall(vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) static void its_vpe_send_cmd(struct its_vpe *vpe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) void (*cmd)(struct its_device *, u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) its_vpe_db_proxy_map_locked(vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) static void its_vpe_send_inv(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) if (gic_rdists->has_direct_lpi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) void __iomem *rdbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) /* Target the redistributor this VPE is currently known on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) wait_for_syncr(rdbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) its_vpe_send_cmd(vpe, its_send_inv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) static void its_vpe_mask_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) * We need to unmask the LPI, which is described by the parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) * irq_data. Instead of calling into the parent (which won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) * exactly do the right thing, let's simply use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) * parent_data pointer. Yes, I'm naughty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) its_vpe_send_inv(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) static void its_vpe_unmask_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) /* Same hack as above... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) its_vpe_send_inv(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) static int its_vpe_set_irqchip_state(struct irq_data *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) enum irqchip_irq_state which,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) bool state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) if (which != IRQCHIP_STATE_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) if (gic_rdists->has_direct_lpi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) void __iomem *rdbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) if (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) wait_for_syncr(rdbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) if (state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) its_vpe_send_cmd(vpe, its_send_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) its_vpe_send_cmd(vpe, its_send_clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) static int its_vpe_retrigger(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) static struct irq_chip its_vpe_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) .name = "GICv4-vpe",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) .irq_mask = its_vpe_mask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) .irq_unmask = its_vpe_unmask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) .irq_eoi = irq_chip_eoi_parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) .irq_set_affinity = its_vpe_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) .irq_retrigger = its_vpe_retrigger,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) .irq_set_irqchip_state = its_vpe_set_irqchip_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) static struct its_node *find_4_1_its(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) static struct its_node *its = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) if (!its) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) list_for_each_entry(its, &its_nodes, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) if (is_v4_1(its))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) return its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) /* Oops? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) its = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) return its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) static void its_vpe_4_1_send_inv(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) struct its_node *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) * GICv4.1 wants doorbells to be invalidated using the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) * INVDB command in order to be broadcast to all RDs. Send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) * it to the first valid ITS, and let the HW do its magic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) its = find_4_1_its();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) if (its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) its_send_invdb(its, vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) static void its_vpe_4_1_mask_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) its_vpe_4_1_send_inv(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) static void its_vpe_4_1_unmask_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) its_vpe_4_1_send_inv(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) static void its_vpe_4_1_schedule(struct its_vpe *vpe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) struct its_cmd_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) u64 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) /* Schedule the VPE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) val |= GICR_VPENDBASER_Valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) struct its_cmd_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) if (info->req_db) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) * vPE is going to block: make the vPE non-resident with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) * PendingLast clear and DB set. The GIC guarantees that if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) * we read-back PendingLast clear, then a doorbell will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) * delivered when an interrupt comes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) * Note the locking to deal with the concurrent update of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) * pending_last from the doorbell interrupt handler that can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) * run concurrently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) raw_spin_lock_irqsave(&vpe->vpe_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) val = its_clear_vpend_valid(vlpi_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) GICR_VPENDBASER_PendingLast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) GICR_VPENDBASER_4_1_DB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) * We're not blocking, so just make the vPE non-resident
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) * with PendingLast set, indicating that we'll be back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) val = its_clear_vpend_valid(vlpi_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) GICR_VPENDBASER_PendingLast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) vpe->pending_last = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) static void its_vpe_4_1_invall(struct its_vpe *vpe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) void __iomem *rdbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) val = GICR_INVALLR_V;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) /* Target the redistributor this vPE is currently known on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) cpu = vpe_to_cpuid_lock(vpe, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) gic_write_lpir(val, rdbase + GICR_INVALLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) wait_for_syncr(rdbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) vpe_to_cpuid_unlock(vpe, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) struct its_cmd_info *info = vcpu_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) switch (info->cmd_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) case SCHEDULE_VPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) its_vpe_4_1_schedule(vpe, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) case DESCHEDULE_VPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) its_vpe_4_1_deschedule(vpe, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) case COMMIT_VPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) its_wait_vpt_parse_complete();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) case INVALL_VPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) its_vpe_4_1_invall(vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) static struct irq_chip its_vpe_4_1_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) .name = "GICv4.1-vpe",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) .irq_mask = its_vpe_4_1_mask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) .irq_unmask = its_vpe_4_1_unmask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) .irq_eoi = irq_chip_eoi_parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) .irq_set_affinity = its_vpe_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) static void its_configure_sgi(struct irq_data *d, bool clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) struct its_cmd_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) desc.its_vsgi_cmd.vpe = vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) desc.its_vsgi_cmd.sgi = d->hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) desc.its_vsgi_cmd.clear = clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) * GICv4.1 allows us to send VSGI commands to any ITS as long as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) * destination VPE is mapped there. Since we map them eagerly at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) * activation time, we're pretty sure the first GICv4.1 ITS will do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) static void its_sgi_mask_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) vpe->sgi_config[d->hwirq].enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) its_configure_sgi(d, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) static void its_sgi_unmask_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) vpe->sgi_config[d->hwirq].enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) its_configure_sgi(d, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) static int its_sgi_set_affinity(struct irq_data *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) const struct cpumask *mask_val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) * There is no notion of affinity for virtual SGIs, at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) * not on the host (since they can only be targetting a vPE).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) * Tell the kernel we've done whatever it asked for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) irq_data_update_effective_affinity(d, mask_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) return IRQ_SET_MASK_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) static int its_sgi_set_irqchip_state(struct irq_data *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) enum irqchip_irq_state which,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) bool state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) if (which != IRQCHIP_STATE_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) if (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) struct its_node *its = find_4_1_its();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) its_configure_sgi(d, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) static int its_sgi_get_irqchip_state(struct irq_data *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) enum irqchip_irq_state which, bool *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) u32 count = 1000000; /* 1s! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) if (which != IRQCHIP_STATE_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) * Locking galore! We can race against two different events:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) * - Concurent vPE affinity change: we must make sure it cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) * happen, or we'll talk to the wrong redistributor. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) * identical to what happens with vLPIs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) * - Concurrent VSGIPENDR access: As it involves accessing two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) * MMIO registers, this must be made atomic one way or another.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) cpu = vpe_to_cpuid_lock(vpe, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) writel_relaxed(vpe->vpe_id, base + GICR_VSGIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) status = readl_relaxed(base + GICR_VSGIPENDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) if (!(status & GICR_VSGIPENDR_BUSY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) if (!count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) pr_err_ratelimited("Unable to get SGI status\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) } while (count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) vpe_to_cpuid_unlock(vpe, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) *val = !!(status & (1 << d->hwirq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) struct its_cmd_info *info = vcpu_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) switch (info->cmd_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) case PROP_UPDATE_VSGI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) vpe->sgi_config[d->hwirq].priority = info->priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) vpe->sgi_config[d->hwirq].group = info->group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) its_configure_sgi(d, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) static struct irq_chip its_sgi_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) .name = "GICv4.1-sgi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) .irq_mask = its_sgi_mask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) .irq_unmask = its_sgi_unmask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) .irq_set_affinity = its_sgi_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) .irq_set_irqchip_state = its_sgi_set_irqchip_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) .irq_get_irqchip_state = its_sgi_get_irqchip_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) .irq_set_vcpu_affinity = its_sgi_set_vcpu_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) static int its_sgi_irq_domain_alloc(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) unsigned int virq, unsigned int nr_irqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) void *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) struct its_vpe *vpe = args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) /* Yes, we do want 16 SGIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) WARN_ON(nr_irqs != 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) for (i = 0; i < 16; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) vpe->sgi_config[i].priority = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) vpe->sgi_config[i].enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) vpe->sgi_config[i].group = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) irq_domain_set_hwirq_and_chip(domain, virq + i, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) &its_sgi_irq_chip, vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) static void its_sgi_irq_domain_free(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) unsigned int virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) unsigned int nr_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) /* Nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) static int its_sgi_irq_domain_activate(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) struct irq_data *d, bool reserve)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) /* Write out the initial SGI configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) its_configure_sgi(d, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) static void its_sgi_irq_domain_deactivate(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) * The VSGI command is awkward:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) * - To change the configuration, CLEAR must be set to false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) * leaving the pending bit unchanged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) * - To clear the pending bit, CLEAR must be set to true, leaving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) * the configuration unchanged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) * You just can't do both at once, hence the two commands below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) vpe->sgi_config[d->hwirq].enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) its_configure_sgi(d, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) its_configure_sgi(d, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) static const struct irq_domain_ops its_sgi_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) .alloc = its_sgi_irq_domain_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) .free = its_sgi_irq_domain_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) .activate = its_sgi_irq_domain_activate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) .deactivate = its_sgi_irq_domain_deactivate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) static int its_vpe_id_alloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) static void its_vpe_id_free(u16 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) ida_simple_remove(&its_vpeid_ida, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) static int its_vpe_init(struct its_vpe *vpe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) struct page *vpt_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) int vpe_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) /* Allocate vpe_id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) vpe_id = its_vpe_id_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) if (vpe_id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) return vpe_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) /* Allocate VPT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) vpt_page = its_allocate_pending_table(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) if (!vpt_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) its_vpe_id_free(vpe_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) if (!its_alloc_vpe_table(vpe_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) its_vpe_id_free(vpe_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) its_free_pending_table(vpt_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) raw_spin_lock_init(&vpe->vpe_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) vpe->vpe_id = vpe_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) vpe->vpt_page = vpt_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) if (gic_rdists->has_rvpeid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) atomic_set(&vpe->vmapp_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) vpe->vpe_proxy_event = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) static void its_vpe_teardown(struct its_vpe *vpe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) its_vpe_db_proxy_unmap(vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) its_vpe_id_free(vpe->vpe_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) its_free_pending_table(vpe->vpt_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) static void its_vpe_irq_domain_free(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) unsigned int virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) unsigned int nr_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) struct its_vm *vm = domain->host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) irq_domain_free_irqs_parent(domain, virq, nr_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) for (i = 0; i < nr_irqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) struct irq_data *data = irq_domain_get_irq_data(domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) virq + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) BUG_ON(vm != vpe->its_vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) clear_bit(data->hwirq, vm->db_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) its_vpe_teardown(vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) irq_domain_reset_irq_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) its_free_prop_table(vm->vprop_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) unsigned int nr_irqs, void *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) struct irq_chip *irqchip = &its_vpe_irq_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) struct its_vm *vm = args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) unsigned long *bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) struct page *vprop_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) int base, nr_ids, i, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) BUG_ON(!vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) if (!bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) if (nr_ids < nr_irqs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) its_lpi_free(bitmap, base, nr_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) vprop_page = its_allocate_prop_table(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) if (!vprop_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) its_lpi_free(bitmap, base, nr_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) vm->db_bitmap = bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) vm->db_lpi_base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) vm->nr_db_lpis = nr_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) vm->vprop_page = vprop_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) if (gic_rdists->has_rvpeid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) irqchip = &its_vpe_4_1_irq_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) for (i = 0; i < nr_irqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) vm->vpes[i]->vpe_db_lpi = base + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) err = its_vpe_init(vm->vpes[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) err = its_irq_gic_domain_alloc(domain, virq + i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) vm->vpes[i]->vpe_db_lpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) irq_domain_set_hwirq_and_chip(domain, virq + i, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) irqchip, vm->vpes[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) set_bit(i, bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) if (i > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) its_vpe_irq_domain_free(domain, virq, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) its_lpi_free(bitmap, base, nr_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) its_free_prop_table(vprop_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) static int its_vpe_irq_domain_activate(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) struct irq_data *d, bool reserve)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) struct its_node *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) * If we use the list map, we issue VMAPP on demand... Unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) * so that VSGIs can work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) if (!gic_requires_eager_mapping())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) /* Map the VPE to the first possible CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) vpe->col_idx = cpumask_first(cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) list_for_each_entry(its, &its_nodes, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) if (!is_v4(its))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) its_send_vmapp(its, vpe, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) its_send_vinvall(its, vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) struct its_node *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) * If we use the list map on GICv4.0, we unmap the VPE once no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) * VLPIs are associated with the VM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) if (!gic_requires_eager_mapping())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) list_for_each_entry(its, &its_nodes, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) if (!is_v4(its))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) its_send_vmapp(its, vpe, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) static const struct irq_domain_ops its_vpe_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) .alloc = its_vpe_irq_domain_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) .free = its_vpe_irq_domain_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) .activate = its_vpe_irq_domain_activate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) .deactivate = its_vpe_irq_domain_deactivate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) static int its_force_quiescent(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) u32 count = 1000000; /* 1s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) val = readl_relaxed(base + GITS_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) * GIC architecture specification requires the ITS to be both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) * disabled and quiescent for writes to GITS_BASER<n> or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) * GITS_CBASER to not have UNPREDICTABLE results.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) /* Disable the generation of all interrupts to this ITS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) writel_relaxed(val, base + GITS_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) /* Poll GITS_CTLR and wait until ITS becomes quiescent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) val = readl_relaxed(base + GITS_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) if (val & GITS_CTLR_QUIESCENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) struct its_node *its = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) /* erratum 22375: only alloc 8MB table size (20 bits) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) its->typer &= ~GITS_TYPER_DEVBITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) struct its_node *its = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) struct its_node *its = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) /* On QDF2400, the size of the ITE is 16Bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) struct its_node *its = its_dev->its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) * The Socionext Synquacer SoC has a so-called 'pre-ITS',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) * which maps 32-bit writes targeted at a separate window of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) * with device ID taken from bits [device_id_bits + 1:2] of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) * the window offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) return its->pre_its_base + (its_dev->device_id << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) struct its_node *its = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) u32 pre_its_window[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) u32 ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) if (!fwnode_property_read_u32_array(its->fwnode_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) "socionext,synquacer-pre-its",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) pre_its_window,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) ARRAY_SIZE(pre_its_window))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) its->pre_its_base = pre_its_window[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) its->get_msi_base = its_irq_get_msi_base_pre_its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) ids = ilog2(pre_its_window[1]) - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) if (device_ids(its) > ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) its->typer &= ~GITS_TYPER_DEVBITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) /* the pre-ITS breaks isolation, so disable MSI remapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) struct its_node *its = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) * Hip07 insists on using the wrong address for the VLPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) * page. Trick it into doing the right thing...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) its->vlpi_redist_offset = SZ_128K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) static const struct gic_quirk its_quirks[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) #ifdef CONFIG_CAVIUM_ERRATUM_22375
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) .desc = "ITS: Cavium errata 22375, 24313",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) .iidr = 0xa100034c, /* ThunderX pass 1.x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) .mask = 0xffff0fff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) .init = its_enable_quirk_cavium_22375,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) #ifdef CONFIG_CAVIUM_ERRATUM_23144
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) .desc = "ITS: Cavium erratum 23144",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) .iidr = 0xa100034c, /* ThunderX pass 1.x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) .mask = 0xffff0fff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) .init = its_enable_quirk_cavium_23144,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) .desc = "ITS: QDF2400 erratum 0065",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) .mask = 0xffffffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) .init = its_enable_quirk_qdf2400_e0065,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) * The Socionext Synquacer SoC incorporates ARM's own GIC-500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) * implementation, but with a 'pre-ITS' added that requires
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) * special handling in software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) .desc = "ITS: Socionext Synquacer pre-ITS",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) .iidr = 0x0001143b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) .mask = 0xffffffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) .init = its_enable_quirk_socionext_synquacer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) #ifdef CONFIG_HISILICON_ERRATUM_161600802
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) .desc = "ITS: Hip07 erratum 161600802",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) .iidr = 0x00000004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) .mask = 0xffffffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) .init = its_enable_quirk_hip07_161600802,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) static void its_enable_quirks(struct its_node *its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) u32 iidr = readl_relaxed(its->base + GITS_IIDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) gic_enable_quirks(iidr, its_quirks, its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) static int its_save_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) struct its_node *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) raw_spin_lock(&its_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) list_for_each_entry(its, &its_nodes, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) base = its->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) its->ctlr_save = readl_relaxed(base + GITS_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) err = its_force_quiescent(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) pr_err("ITS@%pa: failed to quiesce: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) &its->phys_base, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) writel_relaxed(its->ctlr_save, base + GITS_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) base = its->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) writel_relaxed(its->ctlr_save, base + GITS_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) raw_spin_unlock(&its_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) static void its_restore_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) struct its_node *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) raw_spin_lock(&its_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) list_for_each_entry(its, &its_nodes, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) base = its->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) * Make sure that the ITS is disabled. If it fails to quiesce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) * don't restore it since writing to CBASER or BASER<n>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) * registers is undefined according to the GIC v3 ITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) * Specification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) * Firmware resuming with the ITS enabled is terminally broken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) ret = its_force_quiescent(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) &its->phys_base, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) * Writing CBASER resets CREADR to 0, so make CWRITER and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) * cmd_write line up with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) its->cmd_write = its->cmd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) gits_write_cwriter(0, base + GITS_CWRITER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) /* Restore GITS_BASER from the value cache. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) for (i = 0; i < GITS_BASER_NR_REGS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) struct its_baser *baser = &its->tables[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) if (!(baser->val & GITS_BASER_VALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) its_write_baser(its, baser, baser->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) writel_relaxed(its->ctlr_save, base + GITS_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) * Reinit the collection if it's stored in the ITS. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) * indicated by the col_id being less than the HCC field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) * CID < HCC as specified in the GIC v3 Documentation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) if (its->collections[smp_processor_id()].col_id <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) its_cpu_init_collection(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) raw_spin_unlock(&its_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) static struct syscore_ops its_syscore_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) .suspend = its_save_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) .resume = its_restore_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) struct irq_domain *inner_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) struct msi_domain_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) info = kzalloc(sizeof(*info), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) if (!info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) if (!inner_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) kfree(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) inner_domain->parent = its_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) inner_domain->flags |= its->msi_domain_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) info->ops = &its_msi_domain_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) info->data = its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) inner_domain->host_data = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) static int its_init_vpe_domain(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) struct its_node *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) u32 devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) int entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) if (gic_rdists->has_direct_lpi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) pr_info("ITS: Using DirectLPI for VPE invalidation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) /* Any ITS will do, even if not v4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) its = list_first_entry(&its_nodes, struct its_node, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) entries = roundup_pow_of_two(nr_cpu_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) if (!vpe_proxy.vpes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) pr_err("ITS: Can't allocate GICv4 proxy device array\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) /* Use the last possible DevID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) devid = GENMASK(device_ids(its) - 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) vpe_proxy.dev = its_create_device(its, devid, entries, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) if (!vpe_proxy.dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) kfree(vpe_proxy.vpes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) pr_err("ITS: Can't allocate GICv4 proxy device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) BUG_ON(entries > vpe_proxy.dev->nr_ites);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) raw_spin_lock_init(&vpe_proxy.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) vpe_proxy.next_victim = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) devid, vpe_proxy.dev->nr_ites);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) static int __init its_compute_its_list_map(struct resource *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) void __iomem *its_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946) int its_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) u32 ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) * This is assumed to be done early enough that we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) * guaranteed to be single-threaded, hence no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) * locking. Should this change, we should address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) * this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) if (its_number >= GICv4_ITS_LIST_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) pr_err("ITS@%pa: No ITSList entry available!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) &res->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) ctlr = readl_relaxed(its_base + GITS_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) ctlr &= ~GITS_CTLR_ITS_NUMBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) writel_relaxed(ctlr, its_base + GITS_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) ctlr = readl_relaxed(its_base + GITS_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) its_number = ctlr & GITS_CTLR_ITS_NUMBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) if (test_and_set_bit(its_number, &its_list_map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) &res->start, its_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) return its_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) static int __init its_probe_one(struct resource *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) struct fwnode_handle *handle, int numa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) struct its_node *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) void __iomem *its_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) u32 val, ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) u64 baser, tmp, typer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) gfp_t gfp_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) its_base = ioremap(res->start, SZ_64K);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) if (!its_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) if (val != 0x30 && val != 0x40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) goto out_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) err = its_force_quiescent(its_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) goto out_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) pr_info("ITS %pR\n", res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) its = kzalloc(sizeof(*its), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) if (!its) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) goto out_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) raw_spin_lock_init(&its->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) mutex_init(&its->dev_alloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) INIT_LIST_HEAD(&its->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) INIT_LIST_HEAD(&its->its_device_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) typer = gic_read_typer(its_base + GITS_TYPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) its->typer = typer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) its->base = its_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) its->phys_base = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) if (is_v4(its)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) if (!(typer & GITS_TYPER_VMOVP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) err = its_compute_its_list_map(res, its_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) goto out_free_its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) its->list_nr = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035) pr_info("ITS@%pa: Using ITS number %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) &res->start, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) if (is_v4_1(its)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) its->sgir_base = ioremap(res->start + SZ_128K, SZ_64K);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) if (!its->sgir_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) goto out_free_its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) its->mpidr = readl_relaxed(its_base + GITS_MPIDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) &res->start, its->mpidr, svpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057) its->numa_node = numa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) gfp_flags = GFP_KERNEL | __GFP_ZERO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060) if (of_machine_is_compatible("rockchip,rk3568") || of_machine_is_compatible("rockchip,rk3566"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) gfp_flags |= GFP_DMA32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) page = alloc_pages_node(its->numa_node, gfp_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) get_order(ITS_CMD_QUEUE_SZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) goto out_unmap_sgir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) its->cmd_base = (void *)page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) its->cmd_write = its->cmd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070) its->fwnode_handle = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) its->get_msi_base = its_irq_get_msi_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) its_enable_quirks(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) err = its_alloc_tables(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078) goto out_free_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) err = its_alloc_collections(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) goto out_free_tables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) baser = (virt_to_phys(its->cmd_base) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) GITS_CBASER_RaWaWb |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) GITS_CBASER_InnerShareable |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087) (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) GITS_CBASER_VALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) gits_write_cbaser(baser, its->base + GITS_CBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) tmp = gits_read_cbaser(its->base + GITS_CBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093) if (IS_ENABLED(CONFIG_NO_GKI) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) (of_machine_is_compatible("rockchip,rk3568") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) of_machine_is_compatible("rockchip,rk3566") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) of_machine_is_compatible("rockchip,rk3588")))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) tmp &= ~GITS_CBASER_SHAREABILITY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102) * The HW reports non-shareable, we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) * remove the cacheability attributes as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) * well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) GITS_CBASER_CACHEABILITY_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) baser |= GITS_CBASER_nC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) gits_write_cbaser(baser, its->base + GITS_CBASER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) pr_info("ITS: using cache flushing for cmd queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) gits_write_cwriter(0, its->base + GITS_CWRITER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) ctlr = readl_relaxed(its->base + GITS_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) ctlr |= GITS_CTLR_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118) if (is_v4(its))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) ctlr |= GITS_CTLR_ImDe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120) writel_relaxed(ctlr, its->base + GITS_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) err = its_init_domain(handle, its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124) goto out_free_tables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126) raw_spin_lock(&its_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) list_add(&its->entry, &its_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) raw_spin_unlock(&its_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) out_free_tables:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) its_free_tables(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) out_free_cmd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135) free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) out_unmap_sgir:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) if (its->sgir_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) iounmap(its->sgir_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) out_free_its:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) kfree(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) out_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) iounmap(its_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143) pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) static bool gic_rdists_supports_plpis(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152) static int redist_disable_lpis(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) void __iomem *rbase = gic_data_rdist_rd_base();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) u64 timeout = USEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) if (!gic_rdists_supports_plpis()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163) val = readl_relaxed(rbase + GICR_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) if (!(val & GICR_CTLR_ENABLE_LPIS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168) * If coming via a CPU hotplug event, we don't need to disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) * LPIs before trying to re-enable them. They are already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170) * configured and all is well in the world.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) * If running with preallocated tables, there is nothing to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174) if (gic_data_rdist()->lpi_enabled ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175) (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179) * From that point on, we only try to do some damage control.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182) smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183) add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) /* Disable LPIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) val &= ~GICR_CTLR_ENABLE_LPIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187) writel_relaxed(val, rbase + GICR_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) /* Make sure any change to GICR_CTLR is observable by the GIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) dsb(sy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) * Error out if we time out waiting for RWP to clear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) if (!timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) pr_err("CPU%d: Timeout while disabling LPIs\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200) smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) timeout--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208) * After it has been written to 1, it is IMPLEMENTATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) * cleared to 0. Error out if clearing the bit failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) int its_cpu_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222) if (!list_empty(&its_nodes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) ret = redist_disable_lpis();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229) its_cpu_init_lpis();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) its_cpu_init_collections();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) static const struct of_device_id its_device_id[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237) { .compatible = "arm,gic-v3-its", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241) static int __init its_of_probe(struct device_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244) struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) for (np = of_find_matching_node(node, its_device_id); np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) np = of_find_matching_node(np, its_device_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) if (!of_device_is_available(np))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) if (!of_property_read_bool(np, "msi-controller")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251) pr_warn("%pOF: no msi-controller property, ITS ignored\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252) np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256) if (of_address_to_resource(np, 0, &res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257) pr_warn("%pOF: no regs?\n", np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266) #ifdef CONFIG_ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268) #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270) #ifdef CONFIG_ACPI_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271) struct its_srat_map {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272) /* numa node id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) u32 numa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274) /* GIC ITS ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275) u32 its_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278) static struct its_srat_map *its_srat_maps __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279) static int its_in_srat __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281) static int __init acpi_get_its_numa_node(u32 its_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285) for (i = 0; i < its_in_srat; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286) if (its_id == its_srat_maps[i].its_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287) return its_srat_maps[i].numa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289) return NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292) static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293) const unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298) static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) const unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301) int node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302) struct acpi_srat_gic_its_affinity *its_affinity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304) its_affinity = (struct acpi_srat_gic_its_affinity *)header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305) if (!its_affinity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308) if (its_affinity->header.length < sizeof(*its_affinity)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309) pr_err("SRAT: Invalid header length %d in ITS affinity\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310) its_affinity->header.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315) * Note that in theory a new proximity node could be created by this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316) * entry as it is an SRAT resource allocation structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) * We do not currently support doing so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) node = pxm_to_node(its_affinity->proximity_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321) if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322) pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326) its_srat_maps[its_in_srat].numa_node = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327) its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328) its_in_srat++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330) its_affinity->proximity_domain, its_affinity->its_id, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335) static void __init acpi_table_parse_srat_its(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339) count = acpi_table_parse_entries(ACPI_SIG_SRAT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340) sizeof(struct acpi_table_srat),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341) ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342) gic_acpi_match_srat_its, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343) if (count <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346) its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348) if (!its_srat_maps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349) pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353) acpi_table_parse_entries(ACPI_SIG_SRAT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354) sizeof(struct acpi_table_srat),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355) ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356) gic_acpi_parse_srat_its, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359) /* free the its_srat_maps after ITS probing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360) static void __init acpi_its_srat_maps_free(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362) kfree(its_srat_maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365) static void __init acpi_table_parse_srat_its(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366) static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367) static void __init acpi_its_srat_maps_free(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370) static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) const unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) struct acpi_madt_generic_translator *its_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) struct fwnode_handle *dom_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375) struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378) its_entry = (struct acpi_madt_generic_translator *)header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379) memset(&res, 0, sizeof(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380) res.start = its_entry->base_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381) res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382) res.flags = IORESOURCE_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384) dom_handle = irq_domain_alloc_fwnode(&res.start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385) if (!dom_handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387) &res.start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391) err = iort_register_domain_token(its_entry->translation_id, res.start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392) dom_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394) pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395) &res.start, its_entry->translation_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396) goto dom_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399) err = its_probe_one(&res, dom_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400) acpi_get_its_numa_node(its_entry->translation_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404) iort_deregister_domain_token(its_entry->translation_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405) dom_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406) irq_domain_free_fwnode(dom_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410) static void __init its_acpi_probe(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412) acpi_table_parse_srat_its();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413) acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414) gic_acpi_parse_madt_its, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415) acpi_its_srat_maps_free();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418) static void __init its_acpi_probe(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421) int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422) struct irq_domain *parent_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424) struct device_node *of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425) struct its_node *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426) bool has_v4 = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427) bool has_v4_1 = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430) gic_rdists = rdists;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432) its_parent = parent_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433) of_node = to_of_node(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434) if (of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435) its_of_probe(of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437) its_acpi_probe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439) if (list_empty(&its_nodes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440) pr_warn("ITS: No ITS available, not enabling LPIs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444) err = allocate_lpi_tables();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448) list_for_each_entry(its, &its_nodes, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449) has_v4 |= is_v4(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450) has_v4_1 |= is_v4_1(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453) /* Don't bother with inconsistent systems */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454) if (WARN_ON(!has_v4_1 && rdists->has_rvpeid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455) rdists->has_rvpeid = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457) if (has_v4 & rdists->has_vlpis) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458) const struct irq_domain_ops *sgi_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460) if (has_v4_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461) sgi_ops = &its_sgi_domain_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463) sgi_ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465) if (its_init_vpe_domain() ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466) its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467) rdists->has_vlpis = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468) pr_err("ITS: Disabling GICv4 support\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472) register_syscore_ops(&its_syscore_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475) }