^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) // Miscellaneous Arm SMMU implementation and integration quirks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) // Copyright (C) 2019 Arm Limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #define pr_fmt(fmt) "arm-smmu: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "arm-smmu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) static int arm_smmu_gr0_ns(int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) switch(offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) case ARM_SMMU_GR0_sCR0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) case ARM_SMMU_GR0_sACR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) case ARM_SMMU_GR0_sGFSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) case ARM_SMMU_GR0_sGFSYNR0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) case ARM_SMMU_GR0_sGFSYNR1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) case ARM_SMMU_GR0_sGFSYNR2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) return offset + 0x400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) return offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static u32 arm_smmu_read_ns(struct arm_smmu_device *smmu, int page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) if (page == ARM_SMMU_GR0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) offset = arm_smmu_gr0_ns(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return readl_relaxed(arm_smmu_page(smmu, page) + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static void arm_smmu_write_ns(struct arm_smmu_device *smmu, int page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) int offset, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) if (page == ARM_SMMU_GR0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) offset = arm_smmu_gr0_ns(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) writel_relaxed(val, arm_smmu_page(smmu, page) + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* Since we don't care for sGFAR, we can do without 64-bit accessors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static const struct arm_smmu_impl calxeda_impl = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) .read_reg = arm_smmu_read_ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) .write_reg = arm_smmu_write_ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct cavium_smmu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct arm_smmu_device smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) u32 id_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static int cavium_cfg_probe(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static atomic_t context_count = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct cavium_smmu *cs = container_of(smmu, struct cavium_smmu, smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * Cavium CN88xx erratum #27704.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * Ensure ASID and VMID allocation is unique across all SMMUs in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) cs->id_base = atomic_fetch_add(smmu->num_context_banks, &context_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static int cavium_init_context(struct arm_smmu_domain *smmu_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct cavium_smmu *cs = container_of(smmu_domain->smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct cavium_smmu, smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) smmu_domain->cfg.vmid += cs->id_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) smmu_domain->cfg.asid += cs->id_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static const struct arm_smmu_impl cavium_impl = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) .cfg_probe = cavium_cfg_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) .init_context = cavium_init_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct cavium_smmu *cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) cs = devm_kzalloc(smmu->dev, sizeof(*cs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (!cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) cs->smmu = *smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) cs->smmu.impl = &cavium_impl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) devm_kfree(smmu->dev, smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return &cs->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define ARM_MMU500_ACTLR_CPRE (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) int arm_mmu500_reset(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) u32 reg, major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * On MMU-500 r2p0 onwards we need to clear ACR.CACHE_LOCK before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * writes to the context bank ACTLRs will stick. And we just hope that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * Secure has also cleared SACR.CACHE_LOCK for this to take effect...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) major = FIELD_GET(ARM_SMMU_ID7_MAJOR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (major >= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * Allow unmatched Stream IDs to allocate bypass
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * TLB entries for reduced latency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sACR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * Disable MMU-500's not-particularly-beneficial next-page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * prefetcher for the sake of errata #841119 and #826419.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) for (i = 0; i < smmu->num_context_banks; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) reg &= ~ARM_MMU500_ACTLR_CPRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static const struct arm_smmu_impl arm_mmu500_impl = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) .reset = arm_mmu500_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static u64 mrvl_mmu500_readq(struct arm_smmu_device *smmu, int page, int off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * Marvell Armada-AP806 erratum #582743.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * Split all the readq to double readl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return hi_lo_readq_relaxed(arm_smmu_page(smmu, page) + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static void mrvl_mmu500_writeq(struct arm_smmu_device *smmu, int page, int off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * Marvell Armada-AP806 erratum #582743.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * Split all the writeq to double writel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) hi_lo_writeq_relaxed(val, arm_smmu_page(smmu, page) + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static int mrvl_mmu500_cfg_probe(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * Armada-AP806 erratum #582743.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * Hide the SMMU_IDR2.PTFSv8 fields to sidestep the AArch64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * formats altogether and allow using 32 bits access on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * interconnect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) smmu->features &= ~(ARM_SMMU_FEAT_FMT_AARCH64_4K |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) ARM_SMMU_FEAT_FMT_AARCH64_16K |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ARM_SMMU_FEAT_FMT_AARCH64_64K);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static const struct arm_smmu_impl mrvl_mmu500_impl = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) .read_reg64 = mrvl_mmu500_readq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) .write_reg64 = mrvl_mmu500_writeq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) .cfg_probe = mrvl_mmu500_cfg_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) .reset = arm_mmu500_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) const struct device_node *np = smmu->dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * Set the impl for model-specific implementation quirks first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * such that platform integration quirks can pick it up and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * inherit from it if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) switch (smmu->model) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) case ARM_MMU500:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) smmu->impl = &arm_mmu500_impl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) case CAVIUM_SMMUV2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return cavium_smmu_impl_init(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /* This is implicitly MMU-400 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (of_property_read_bool(np, "calxeda,smmu-secure-config-access"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) smmu->impl = &calxeda_impl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (of_device_is_compatible(np, "nvidia,tegra194-smmu"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return nvidia_smmu_impl_init(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (of_device_is_compatible(np, "qcom,sdm845-smmu-500") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) of_device_is_compatible(np, "qcom,sc7180-smmu-500") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) of_device_is_compatible(np, "qcom,sm8150-smmu-500") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) of_device_is_compatible(np, "qcom,sm8250-smmu-500"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return qcom_smmu_impl_init(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (of_device_is_compatible(smmu->dev->of_node, "qcom,adreno-smmu"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return qcom_adreno_smmu_impl_init(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (of_device_is_compatible(np, "marvell,ap806-smmu-500"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) smmu->impl = &mrvl_mmu500_impl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }