^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) // Copyright (C) 2019-2020 NVIDIA CORPORATION. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "arm-smmu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Tegra194 has three ARM MMU-500 Instances.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Two of them are used together and must be programmed identically for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * interleaved IOVA accesses across them and translates accesses from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * non-isochronous HW devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Third one is used for translating accesses from isochronous HW devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * This implementation supports programming of the two instances that must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * be programmed identically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * The third instance usage is through standard arm-smmu driver itself and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * is out of scope of this implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define NUM_SMMU_INSTANCES 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct nvidia_smmu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct arm_smmu_device smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) void __iomem *bases[NUM_SMMU_INSTANCES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static inline void __iomem *nvidia_smmu_page(struct arm_smmu_device *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) unsigned int inst, int page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct nvidia_smmu *nvidia_smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) nvidia_smmu = container_of(smmu, struct nvidia_smmu, smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) return nvidia_smmu->bases[inst] + (page << smmu->pgshift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static u32 nvidia_smmu_read_reg(struct arm_smmu_device *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) int page, int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) void __iomem *reg = nvidia_smmu_page(smmu, 0, page) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return readl_relaxed(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static void nvidia_smmu_write_reg(struct arm_smmu_device *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int page, int offset, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) writel_relaxed(val, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static u64 nvidia_smmu_read_reg64(struct arm_smmu_device *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) int page, int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) void __iomem *reg = nvidia_smmu_page(smmu, 0, page) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return readq_relaxed(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static void nvidia_smmu_write_reg64(struct arm_smmu_device *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) int page, int offset, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) writeq_relaxed(val, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) int sync, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) unsigned int delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) arm_smmu_writel(smmu, page, sync, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) unsigned int spin_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) void __iomem *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) reg = nvidia_smmu_page(smmu, i, page) + status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) val |= readl_relaxed(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (!(val & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) udelay(delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) dev_err_ratelimited(smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) "TLB sync timed out -- SMMU may be deadlocked\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static int nvidia_smmu_reset(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) void __iomem *reg = nvidia_smmu_page(smmu, i, ARM_SMMU_GR0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) ARM_SMMU_GR0_sGFSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* clear global FSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) val = readl_relaxed(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) writel_relaxed(val, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static irqreturn_t nvidia_smmu_global_fault_inst(int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct arm_smmu_device *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) int inst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) void __iomem *gr0_base = nvidia_smmu_page(smmu, inst, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (!gfsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) dev_err_ratelimited(smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) "Unexpected global fault, this could be serious\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) dev_err_ratelimited(smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) gfsr, gfsynr0, gfsynr1, gfsynr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) writel_relaxed(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static irqreturn_t nvidia_smmu_global_fault(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) unsigned int inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) irqreturn_t ret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct arm_smmu_device *smmu = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) irqreturn_t irq_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) irq_ret = nvidia_smmu_global_fault_inst(irq, smmu, inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (irq_ret == IRQ_HANDLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static irqreturn_t nvidia_smmu_context_fault_bank(int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct arm_smmu_device *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) int idx, int inst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) u32 fsr, fsynr, cbfrsynra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) unsigned long iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) void __iomem *gr1_base = nvidia_smmu_page(smmu, inst, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) void __iomem *cb_base = nvidia_smmu_page(smmu, inst, smmu->numpage + idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (!(fsr & ARM_SMMU_FSR_FAULT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) cbfrsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) dev_err_ratelimited(smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) fsr, iova, fsynr, cbfrsynra, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static irqreturn_t nvidia_smmu_context_fault(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) unsigned int inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) irqreturn_t ret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct arm_smmu_device *smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct iommu_domain *domain = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct arm_smmu_domain *smmu_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) smmu_domain = container_of(domain, struct arm_smmu_domain, domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) smmu = smmu_domain->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) irqreturn_t irq_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * Interrupt line is shared between all contexts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * Check for faults across all contexts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) for (idx = 0; idx < smmu->num_context_banks; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) irq_ret = nvidia_smmu_context_fault_bank(irq, smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) idx, inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (irq_ret == IRQ_HANDLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static const struct arm_smmu_impl nvidia_smmu_impl = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) .read_reg = nvidia_smmu_read_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) .write_reg = nvidia_smmu_write_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) .read_reg64 = nvidia_smmu_read_reg64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) .write_reg64 = nvidia_smmu_write_reg64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) .reset = nvidia_smmu_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) .tlb_sync = nvidia_smmu_tlb_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) .global_fault = nvidia_smmu_global_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) .context_fault = nvidia_smmu_context_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct device *dev = smmu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct nvidia_smmu *nvidia_smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct platform_device *pdev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) nvidia_smmu = devm_kzalloc(dev, sizeof(*nvidia_smmu), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (!nvidia_smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * Copy the data from struct arm_smmu_device *smmu allocated in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * arm-smmu.c. The smmu from struct nvidia_smmu replaces the smmu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * pointer used in arm-smmu.c once this function returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * This is necessary to derive nvidia_smmu from smmu pointer passed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * through arm_smmu_impl function calls subsequently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) nvidia_smmu->smmu = *smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* Instance 0 is ioremapped by arm-smmu.c. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) nvidia_smmu->bases[0] = smmu->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) nvidia_smmu->bases[1] = devm_ioremap_resource(dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (IS_ERR(nvidia_smmu->bases[1]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return ERR_CAST(nvidia_smmu->bases[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) nvidia_smmu->smmu.impl = &nvidia_smmu_impl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * Free the struct arm_smmu_device *smmu allocated in arm-smmu.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * Once this function returns, arm-smmu.c would use arm_smmu_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * allocated as part of struct nvidia_smmu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) devm_kfree(dev, smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return &nvidia_smmu->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }