^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * IOMMU API for ARM architected SMMU implementations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2013 ARM Limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Will Deacon <will.deacon@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This driver currently supports:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * - SMMUv1 and v2 implementations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * - Stream-matching and stream-indexing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * - v7/v8 long-descriptor format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * - Non-secure access to the SMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * - Context fault reporting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * - Extended Stream ID (16 bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define pr_fmt(fmt) "arm-smmu: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/acpi_iort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/dma-iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/of_iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/ratelimit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/amba/bus.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/fsl/mc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include "arm-smmu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * global register space are still, in fact, using a hypervisor to mediate it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * by trapping and emulating register accesses. Sadly, some deployed versions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * of said trapping code have bugs wherein they go horribly wrong for stores
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * using r31 (i.e. XZR/WZR) as the source register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define QCOM_DUMMY_VAL -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define MSI_IOVA_BASE 0x8000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define MSI_IOVA_LENGTH 0x100000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static int force_stage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) module_param(force_stage, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) MODULE_PARM_DESC(force_stage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static bool disable_bypass =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) module_param(disable_bypass, bool, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) MODULE_PARM_DESC(disable_bypass,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define s2cr_init_val (struct arm_smmu_s2cr){ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static bool using_legacy_binding, using_generic_binding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (pm_runtime_enabled(smmu->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return pm_runtime_resume_and_get(smmu->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (pm_runtime_enabled(smmu->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) pm_runtime_put_autosuspend(smmu->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return container_of(dom, struct arm_smmu_domain, domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static struct platform_driver arm_smmu_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static struct iommu_ops arm_smmu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static int arm_smmu_bus_init(struct iommu_ops *ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static struct device_node *dev_get_dev_node(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (dev_is_pci(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct pci_bus *bus = to_pci_dev(dev)->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) while (!pci_is_root_bus(bus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) bus = bus->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return of_node_get(bus->bridge->parent->of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return of_node_get(dev->of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) *((__be32 *)data) = cpu_to_be32(alias);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return 0; /* Continue walking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static int __find_legacy_master_phandle(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct of_phandle_iterator *it = *(void **)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct device_node *np = it->node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) "#stream-id-cells", -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (it->node == np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) *(void **)data = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) it->node = np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return err == -ENOENT ? 0 : err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static int arm_smmu_register_legacy_master(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct arm_smmu_device **smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct device *smmu_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct of_phandle_iterator it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) void *data = ⁢
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) u32 *sids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) __be32 pci_sid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) np = dev_get_dev_node(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) it.node = np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) __find_legacy_master_phandle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) smmu_dev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (dev_is_pci(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /* "mmu-masters" assumes Stream ID == Requester ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) &pci_sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) it.cur = &pci_sid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) it.cur_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) &arm_smmu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (!sids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) *smmu = dev_get_drvdata(smmu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) of_phandle_iterator_args(&it, sids, it.cur_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) kfree(sids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * With the legacy DT binding in play, we have no guarantees about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * probe order, but then we're also not doing default domains, so we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * delay setting bus ops until we're sure every possible SMMU is ready,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * and that way ensure that no probe_device() calls get missed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static int arm_smmu_legacy_bus_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (using_legacy_binding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return arm_smmu_bus_init(&arm_smmu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) device_initcall_sync(arm_smmu_legacy_bus_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static int arm_smmu_register_legacy_master(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct arm_smmu_device **smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #endif /* CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) clear_bit(idx, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* Wait for any pending TLB invalidations to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) int sync, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) unsigned int spin_cnt, delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (smmu->impl && unlikely(smmu->impl->tlb_sync))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return smmu->impl->tlb_sync(smmu, page, sync, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) reg = arm_smmu_readl(smmu, page, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) udelay(delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) dev_err_ratelimited(smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) "TLB sync timed out -- SMMU may be deadlocked\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) spin_lock_irqsave(&smmu->global_sync_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) __arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) ARM_SMMU_GR0_sTLBGSTATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct arm_smmu_device *smmu = smmu_domain->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) spin_lock_irqsave(&smmu_domain->cb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) ARM_SMMU_CB_TLBSYNC, ARM_SMMU_CB_TLBSTATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static void arm_smmu_tlb_inv_context_s1(void *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct arm_smmu_domain *smmu_domain = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * The TLBI write may be relaxed, so ensure that PTEs cleared by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * current CPU are visible beforehand.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) arm_smmu_tlb_sync_context(smmu_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static void arm_smmu_tlb_inv_context_s2(void *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct arm_smmu_domain *smmu_domain = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct arm_smmu_device *smmu = smmu_domain->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /* See above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) arm_smmu_tlb_sync_global(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) size_t granule, void *cookie, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct arm_smmu_domain *smmu_domain = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct arm_smmu_device *smmu = smmu_domain->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) int idx = cfg->cbndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) iova = (iova >> 12) << 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) iova |= cfg->asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) arm_smmu_cb_write(smmu, idx, reg, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) iova += granule;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) } while (size -= granule);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) iova >>= 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) iova |= (u64)cfg->asid << 48;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) arm_smmu_cb_writeq(smmu, idx, reg, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) iova += granule >> 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) } while (size -= granule);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) size_t granule, void *cookie, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct arm_smmu_domain *smmu_domain = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct arm_smmu_device *smmu = smmu_domain->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) int idx = smmu_domain->cfg.cbndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) iova >>= 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) arm_smmu_cb_writeq(smmu, idx, reg, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) arm_smmu_cb_write(smmu, idx, reg, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) iova += granule >> 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) } while (size -= granule);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) size_t granule, void *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) ARM_SMMU_CB_S1_TLBIVA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) arm_smmu_tlb_sync_context(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) unsigned long iova, size_t granule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) void *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) ARM_SMMU_CB_S1_TLBIVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) size_t granule, void *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) ARM_SMMU_CB_S2_TLBIIPAS2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) arm_smmu_tlb_sync_context(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) unsigned long iova, size_t granule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) void *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) ARM_SMMU_CB_S2_TLBIIPAS2L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static void arm_smmu_tlb_inv_walk_s2_v1(unsigned long iova, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) size_t granule, void *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) arm_smmu_tlb_inv_context_s2(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * almost negligible, but the benefit of getting the first one in as far ahead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * of the sync as possible is significant, hence we don't just make this a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * think.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) unsigned long iova, size_t granule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) void *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct arm_smmu_domain *smmu_domain = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct arm_smmu_device *smmu = smmu_domain->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) .tlb_flush_walk = arm_smmu_tlb_inv_walk_s1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) .tlb_add_page = arm_smmu_tlb_add_page_s1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) .tlb_add_page = arm_smmu_tlb_add_page_s2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2_v1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) .tlb_add_page = arm_smmu_tlb_add_page_s2_v1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) u32 fsr, fsynr, cbfrsynra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) unsigned long iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct iommu_domain *domain = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct arm_smmu_device *smmu = smmu_domain->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) int idx = smmu_domain->cfg.cbndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (!(fsr & ARM_SMMU_FSR_FAULT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) dev_err_ratelimited(smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) fsr, iova, fsynr, cbfrsynra, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct arm_smmu_device *smmu = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) DEFAULT_RATELIMIT_BURST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (!gfsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (__ratelimit(&rs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) (gfsr & ARM_SMMU_sGFSR_USF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) dev_err(smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) "Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) (u16)gfsynr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) dev_err(smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) "Unexpected global fault, this could be serious\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) dev_err(smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) gfsr, gfsynr0, gfsynr1, gfsynr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct io_pgtable_cfg *pgtbl_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) cb->cfg = cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /* TCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (stage1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) cb->tcr[0] = arm_smmu_lpae_tcr(pgtbl_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) cb->tcr[1] = arm_smmu_lpae_tcr2(pgtbl_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) cb->tcr[1] |= ARM_SMMU_TCR2_AS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) cb->tcr[0] |= ARM_SMMU_TCR_EAE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) cb->tcr[0] = arm_smmu_lpae_vtcr(pgtbl_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /* TTBRs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (stage1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) cb->ttbr[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) cfg->asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) cfg->asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) cb->ttbr[1] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) cb->ttbr[0] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) /* MAIRs (stage-1 only) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (stage1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) bool stage1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct arm_smmu_cb *cb = &smmu->cbs[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) struct arm_smmu_cfg *cfg = cb->cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /* Unassigned context banks only need disabling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (!cfg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) /* CBA2R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (smmu->version > ARM_SMMU_V1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) reg = ARM_SMMU_CBA2R_VA64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) /* 16-bit VMIDs live in CBA2R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (smmu->features & ARM_SMMU_FEAT_VMID16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) reg |= FIELD_PREP(ARM_SMMU_CBA2R_VMID16, cfg->vmid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /* CBAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, cfg->cbar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (smmu->version < ARM_SMMU_V2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) reg |= FIELD_PREP(ARM_SMMU_CBAR_IRPTNDX, cfg->irptndx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * Use the weakest shareability/memory types, so they are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * overridden by the ttbcr/pte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (stage1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) reg |= FIELD_PREP(ARM_SMMU_CBAR_S1_BPSHCFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) ARM_SMMU_CBAR_S1_BPSHCFG_NSH) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) FIELD_PREP(ARM_SMMU_CBAR_S1_MEMATTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) ARM_SMMU_CBAR_S1_MEMATTR_WB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /* 8-bit VMIDs live in CBAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) reg |= FIELD_PREP(ARM_SMMU_CBAR_VMID, cfg->vmid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * TCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * We must write this before the TTBRs, since it determines the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * access behaviour of some fields (in particular, ASID[15:8]).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (stage1 && smmu->version > ARM_SMMU_V1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /* TTBRs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (stage1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) cb->ttbr[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) /* MAIRs (stage-1 only) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (stage1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /* SCTLR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE | ARM_SMMU_SCTLR_AFE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) ARM_SMMU_SCTLR_TRE | ARM_SMMU_SCTLR_M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (stage1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) reg |= ARM_SMMU_SCTLR_S1_ASIDPNE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) reg |= ARM_SMMU_SCTLR_E;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) static int arm_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) struct arm_smmu_device *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct device *dev, unsigned int start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (smmu->impl && smmu->impl->alloc_context_bank)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) static int arm_smmu_init_domain_context(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) struct arm_smmu_device *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) int irq, start, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) unsigned long ias, oas;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct io_pgtable_ops *pgtbl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) struct io_pgtable_cfg pgtbl_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) enum io_pgtable_fmt fmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) irqreturn_t (*context_fault)(int irq, void *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) mutex_lock(&smmu_domain->init_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (smmu_domain->smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (domain->type == IOMMU_DOMAIN_IDENTITY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) smmu_domain->smmu = smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * Mapping the requested stage onto what we support is surprisingly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * complicated, mainly because the spec allows S1+S2 SMMUs without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * support for nested translation. That means we end up with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * following table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * Requested Supported Actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * S1 N S1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * S1 S1+S2 S1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * S1 S2 S2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * S1 S1 S1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * N N N
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * N S1+S2 S2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * N S2 S2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * N S1 S1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * Note that you can't actually request stage-2 mappings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * Choosing a suitable context format is even more fiddly. Until we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * grow some way for the caller to express a preference, and/or move
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * the decision into the io-pgtable code where it arguably belongs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * just aim for the closest thing to the rest of the system, and hope
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * that the hardware isn't esoteric enough that we can't assume AArch64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * support to be a superset of AArch32 support...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) ARM_SMMU_FEAT_FMT_AARCH64_16K |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) ARM_SMMU_FEAT_FMT_AARCH64_4K)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) switch (smmu_domain->stage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) case ARM_SMMU_DOMAIN_S1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) start = smmu->num_s2_context_banks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) ias = smmu->va_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) oas = smmu->ipa_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) fmt = ARM_64_LPAE_S1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) fmt = ARM_32_LPAE_S1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) ias = min(ias, 32UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) oas = min(oas, 40UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) fmt = ARM_V7S;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) ias = min(ias, 32UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) oas = min(oas, 32UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) case ARM_SMMU_DOMAIN_NESTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * We will likely want to change this if/when KVM gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * involved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) case ARM_SMMU_DOMAIN_S2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) cfg->cbar = CBAR_TYPE_S2_TRANS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) ias = smmu->ipa_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) oas = smmu->pa_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) fmt = ARM_64_LPAE_S2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) fmt = ARM_32_LPAE_S2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) ias = min(ias, 40UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) oas = min(oas, 40UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (smmu->version == ARM_SMMU_V2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) ret = arm_smmu_alloc_context_bank(smmu_domain, smmu, dev, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) smmu_domain->smmu = smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) cfg->cbndx = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (smmu->version < ARM_SMMU_V2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) cfg->irptndx = atomic_inc_return(&smmu->irptndx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) cfg->irptndx %= smmu->num_context_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) cfg->irptndx = cfg->cbndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) cfg->vmid = cfg->cbndx + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) cfg->asid = cfg->cbndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) pgtbl_cfg = (struct io_pgtable_cfg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) .pgsize_bitmap = smmu->pgsize_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) .ias = ias,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) .oas = oas,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) .tlb = smmu_domain->flush_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) .iommu_dev = smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (smmu->impl && smmu->impl->init_context) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) goto out_clear_smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) if (smmu_domain->non_strict)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (!pgtbl_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) goto out_clear_smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /* Update the domain's page sizes to reflect the page table format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (pgtbl_cfg.quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) domain->geometry.aperture_start = ~0UL << ias;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) domain->geometry.aperture_end = ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) domain->geometry.aperture_end = (1UL << ias) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) domain->geometry.force_aperture = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) /* Initialise the context bank with our page table cfg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) arm_smmu_write_context_bank(smmu, cfg->cbndx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * Request context fault interrupt. Do this last to avoid the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * handler seeing a half-initialised domain state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (smmu->impl && smmu->impl->context_fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) context_fault = smmu->impl->context_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) context_fault = arm_smmu_context_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) ret = devm_request_irq(smmu->dev, irq, context_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) IRQF_SHARED, "arm-smmu-context-fault", domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) cfg->irptndx, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) cfg->irptndx = ARM_SMMU_INVALID_IRPTNDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) mutex_unlock(&smmu_domain->init_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) /* Publish page table ops for map/unmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) smmu_domain->pgtbl_ops = pgtbl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) out_clear_smmu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) smmu_domain->smmu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) mutex_unlock(&smmu_domain->init_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) struct arm_smmu_device *smmu = smmu_domain->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) int ret, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) ret = arm_smmu_rpm_get(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * Disable the context bank and free the page tables before freeing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) smmu->cbs[cfg->cbndx].cfg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) arm_smmu_write_context_bank(smmu, cfg->cbndx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) devm_free_irq(smmu->dev, irq, domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) free_io_pgtable_ops(smmu_domain->pgtbl_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) arm_smmu_rpm_put(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) struct arm_smmu_domain *smmu_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (type != IOMMU_DOMAIN_UNMANAGED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) type != IOMMU_DOMAIN_DMA &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) type != IOMMU_DOMAIN_IDENTITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * Allocate the domain and initialise some of its data structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * We can't really do anything meaningful until we've added a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * master.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (!smmu_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) iommu_get_dma_cookie(&smmu_domain->domain))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) kfree(smmu_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) mutex_init(&smmu_domain->init_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) spin_lock_init(&smmu_domain->cb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return &smmu_domain->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) static void arm_smmu_domain_free(struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * Free the domain resources. We assume that all devices have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * already been detached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) iommu_put_dma_cookie(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) arm_smmu_destroy_domain_context(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) kfree(smmu_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) struct arm_smmu_smr *smr = smmu->smrs + idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) u32 reg = FIELD_PREP(ARM_SMMU_SMR_ID, smr->id) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) FIELD_PREP(ARM_SMMU_SMR_MASK, smr->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) reg |= ARM_SMMU_SMR_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (smmu->impl && smmu->impl->write_s2cr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) smmu->impl->write_s2cr(smmu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) smmu->smrs[idx].valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) reg |= ARM_SMMU_S2CR_EXIDVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) arm_smmu_write_s2cr(smmu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (smmu->smrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) arm_smmu_write_smr(smmu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * should be called after sCR0 is written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) u32 smr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (!smmu->smrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * If we've had to accommodate firmware memory regions, we may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * have live SMRs by now; tread carefully...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * Somewhat perversely, not having a free SMR for this test implies we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * can get away without it anyway, as we'll only be able to 'allocate'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * these SMRs for the ID/mask values we're already trusting to be OK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) for (i = 0; i < smmu->num_mapping_groups; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (!smmu->smrs[i].valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) goto smr_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) smr_ok:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * SMR.ID bits may not be preserved if the corresponding MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * bits are set, so check each one separately. We can reject
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * masters later if they try to claim IDs outside these masks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) struct arm_smmu_smr *smrs = smmu->smrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) int i, free_idx = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) /* Stream indexing is blissfully easy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (!smrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) return id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) /* Validating SMRs is... less so */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) for (i = 0; i < smmu->num_mapping_groups; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (!smrs[i].valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * Note the first free entry we come across, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) * we'll claim in the end if nothing else matches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (free_idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) free_idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * If the new entry is _entirely_ matched by an existing entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * then reuse that, with the guarantee that there also cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * be any subsequent conflicting entries. In normal use we'd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * expect simply identical entries for this case, but there's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * no harm in accommodating the generalisation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if ((mask & smrs[i].mask) == mask &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) !((id ^ smrs[i].id) & ~smrs[i].mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * If the new entry has any other overlap with an existing one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * though, then there always exists at least one stream ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * which would cause a conflict, and we can't allow that risk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) return free_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) bool pinned = smmu->s2crs[idx].pinned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) u8 cbndx = smmu->s2crs[idx].cbndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (--smmu->s2crs[idx].count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) smmu->s2crs[idx] = s2cr_init_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (pinned) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) smmu->s2crs[idx].pinned = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) smmu->s2crs[idx].cbndx = cbndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) } else if (smmu->smrs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) smmu->smrs[idx].valid = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) static int arm_smmu_master_alloc_smes(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) struct arm_smmu_device *smmu = cfg->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) struct arm_smmu_smr *smrs = smmu->smrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) int i, idx, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) mutex_lock(&smmu->stream_map_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) /* Figure out a viable stream map entry allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) for_each_cfg_sme(cfg, fwspec, i, idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (idx != INVALID_SMENDX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) ret = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) ret = arm_smmu_find_sme(smmu, sid, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) idx = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (smrs && smmu->s2crs[idx].count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) smrs[idx].id = sid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) smrs[idx].mask = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) smrs[idx].valid = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) smmu->s2crs[idx].count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) cfg->smendx[i] = (s16)idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) /* It worked! Now, poke the actual hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) for_each_cfg_sme(cfg, fwspec, i, idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) arm_smmu_write_sme(smmu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) mutex_unlock(&smmu->stream_map_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) while (i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) arm_smmu_free_sme(smmu, cfg->smendx[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) cfg->smendx[i] = INVALID_SMENDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) mutex_unlock(&smmu->stream_map_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) struct iommu_fwspec *fwspec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) struct arm_smmu_device *smmu = cfg->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) int i, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) mutex_lock(&smmu->stream_map_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) for_each_cfg_sme(cfg, fwspec, i, idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) if (arm_smmu_free_sme(smmu, idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) arm_smmu_write_sme(smmu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) cfg->smendx[i] = INVALID_SMENDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) mutex_unlock(&smmu->stream_map_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) struct arm_smmu_master_cfg *cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) struct iommu_fwspec *fwspec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) struct arm_smmu_device *smmu = smmu_domain->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) struct arm_smmu_s2cr *s2cr = smmu->s2crs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) u8 cbndx = smmu_domain->cfg.cbndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) enum arm_smmu_s2cr_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) int i, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) type = S2CR_TYPE_BYPASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) type = S2CR_TYPE_TRANS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) for_each_cfg_sme(cfg, fwspec, i, idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) /* Don't bypasss pinned streams; leave them as they are */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) if (type == S2CR_TYPE_BYPASS && s2cr[idx].pinned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) s2cr[idx].type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) s2cr[idx].cbndx = cbndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) arm_smmu_write_s2cr(smmu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) struct arm_smmu_master_cfg *cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) struct arm_smmu_device *smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (!fwspec || fwspec->ops != &arm_smmu_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * FIXME: The arch/arm DMA API code tries to attach devices to its own
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * domains between of_xlate() and probe_device() - we have no way to cope
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * with that, so until ARM gets converted to rely on groups and default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * domains, just say no (but more politely than by dereferencing NULL).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * This should be at least a WARN_ON once that's sorted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) cfg = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (!cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) smmu = cfg->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) ret = arm_smmu_rpm_get(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) /* Ensure that the domain is finalised */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) ret = arm_smmu_init_domain_context(domain, smmu, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) goto rpm_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * Sanity check the domain. We don't support domains across
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * different SMMUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (smmu_domain->smmu != smmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) goto rpm_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) /* Looks ok, so add the device to the domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) ret = arm_smmu_domain_add_master(smmu_domain, cfg, fwspec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * Setup an autosuspend delay to avoid bouncing runpm state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * Otherwise, if a driver for a suspended consumer device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) * unmaps buffers, it will runpm resume/suspend for each one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) * For example, when used by a GPU device, when an application
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * or game exits, it can trigger unmapping 100s or 1000s of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * buffers. With a runpm cycle for each buffer, that adds up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * to 5-10sec worth of reprogramming the context bank, while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) * the system appears to be locked up to the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) pm_runtime_set_autosuspend_delay(smmu->dev, 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) pm_runtime_use_autosuspend(smmu->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) rpm_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) arm_smmu_rpm_put(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) phys_addr_t paddr, size_t pgsize, size_t pgcount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) int prot, gfp_t gfp, size_t *mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (!ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) arm_smmu_rpm_get(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, mapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) arm_smmu_rpm_put(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) static size_t arm_smmu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) size_t pgsize, size_t pgcount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) struct iommu_iotlb_gather *iotlb_gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) size_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (!ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) arm_smmu_rpm_get(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) ret = ops->unmap_pages(ops, iova, pgsize, pgcount, iotlb_gather);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) arm_smmu_rpm_put(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) struct arm_smmu_device *smmu = smmu_domain->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (smmu_domain->flush_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) arm_smmu_rpm_get(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) smmu_domain->flush_ops->tlb_flush_all(smmu_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) arm_smmu_rpm_put(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) struct iommu_iotlb_gather *gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) struct arm_smmu_device *smmu = smmu_domain->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (!smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) arm_smmu_rpm_get(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if (smmu->version == ARM_SMMU_V2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) arm_smmu_tlb_sync_context(smmu_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) arm_smmu_tlb_sync_global(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) arm_smmu_rpm_put(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) dma_addr_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) struct arm_smmu_device *smmu = smmu_domain->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) struct device *dev = smmu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) void __iomem *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) u64 phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) unsigned long va, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) int ret, idx = cfg->cbndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) phys_addr_t addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) ret = arm_smmu_rpm_get(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) spin_lock_irqsave(&smmu_domain->cb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) va = iova & ~0xfffUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ARM_SMMU_ATSR_ACTIVE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 5, 50)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) "iova to phys timed out on %pad. Falling back to software table walk.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) &iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) arm_smmu_rpm_put(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) return ops->iova_to_phys(ops, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (phys & ARM_SMMU_CB_PAR_F) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) dev_err(dev, "translation fault!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) dev_err(dev, "PAR = 0x%llx\n", phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) addr = (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) arm_smmu_rpm_put(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) dma_addr_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (domain->type == IOMMU_DOMAIN_IDENTITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) return iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) if (!ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) return arm_smmu_iova_to_phys_hard(domain, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) return ops->iova_to_phys(ops, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) static bool arm_smmu_capable(enum iommu_cap cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) switch (cap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) case IOMMU_CAP_CACHE_COHERENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) * Return true here as the SMMU can always send out coherent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) case IOMMU_CAP_NOEXEC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) return dev ? dev_get_drvdata(dev) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) static struct iommu_device *arm_smmu_probe_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) struct arm_smmu_device *smmu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) struct arm_smmu_master_cfg *cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) if (using_legacy_binding) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) ret = arm_smmu_register_legacy_master(dev, &smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) * will allocate/initialise a new one. Thus we need to update fwspec for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * later use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) for (i = 0; i < fwspec->num_ids; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) if (sid & ~smmu->streamid_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) sid, smmu->streamid_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) if (mask & ~smmu->smr_mask_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) mask, smmu->smr_mask_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (!cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) cfg->smmu = smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) dev_iommu_priv_set(dev, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) while (i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) cfg->smendx[i] = INVALID_SMENDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) ret = arm_smmu_rpm_get(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) goto out_cfg_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) ret = arm_smmu_master_alloc_smes(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) arm_smmu_rpm_put(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) goto out_cfg_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) device_link_add(dev, smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) return &smmu->iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) out_cfg_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) kfree(cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) iommu_fwspec_free(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) static void arm_smmu_release_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) struct arm_smmu_master_cfg *cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) struct arm_smmu_device *smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) if (!fwspec || fwspec->ops != &arm_smmu_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) cfg = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) smmu = cfg->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) ret = arm_smmu_rpm_get(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) arm_smmu_master_free_smes(cfg, fwspec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) arm_smmu_rpm_put(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) dev_iommu_priv_set(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) kfree(cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) iommu_fwspec_free(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) static struct iommu_group *arm_smmu_device_group(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) struct arm_smmu_device *smmu = cfg->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) struct iommu_group *group = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) int i, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) for_each_cfg_sme(cfg, fwspec, i, idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) if (group && smmu->s2crs[idx].group &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) group != smmu->s2crs[idx].group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) group = smmu->s2crs[idx].group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) if (group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) return iommu_group_ref_get(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) if (dev_is_pci(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) group = pci_device_group(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) else if (dev_is_fsl_mc(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) group = fsl_mc_device_group(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) group = generic_device_group(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) /* Remember group for faster lookups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) if (!IS_ERR(group))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) for_each_cfg_sme(cfg, fwspec, i, idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) smmu->s2crs[idx].group = group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) enum iommu_attr attr, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) switch(domain->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) case IOMMU_DOMAIN_UNMANAGED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) switch (attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) case DOMAIN_ATTR_NESTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) case IOMMU_DOMAIN_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) switch (attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) *(int *)data = smmu_domain->non_strict;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) enum iommu_attr attr, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) mutex_lock(&smmu_domain->init_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) switch(domain->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) case IOMMU_DOMAIN_UNMANAGED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) switch (attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) case DOMAIN_ATTR_NESTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) if (smmu_domain->smmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) if (*(int *)data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) case IOMMU_DOMAIN_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) switch (attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) smmu_domain->non_strict = *(int *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) mutex_unlock(&smmu_domain->init_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) u32 mask, fwid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) if (args->args_count > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) fwid |= FIELD_PREP(ARM_SMMU_SMR_ID, args->args[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) if (args->args_count > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, args->args[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) return iommu_fwspec_add_ids(dev, &fwid, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) static void arm_smmu_get_resv_regions(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) struct iommu_resv_region *region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) prot, IOMMU_RESV_SW_MSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) if (!region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) list_add_tail(®ion->list, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) iommu_dma_get_resv_regions(dev, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) static int arm_smmu_def_domain_type(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) const struct arm_smmu_impl *impl = cfg->smmu->impl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) if (impl && impl->def_domain_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) return impl->def_domain_type(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) static struct iommu_ops arm_smmu_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) .capable = arm_smmu_capable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) .domain_alloc = arm_smmu_domain_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) .domain_free = arm_smmu_domain_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) .attach_dev = arm_smmu_attach_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) .map_pages = arm_smmu_map_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) .unmap_pages = arm_smmu_unmap_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) .flush_iotlb_all = arm_smmu_flush_iotlb_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) .iotlb_sync = arm_smmu_iotlb_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) .iova_to_phys = arm_smmu_iova_to_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) .probe_device = arm_smmu_probe_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) .release_device = arm_smmu_release_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) .device_group = arm_smmu_device_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) .domain_get_attr = arm_smmu_domain_get_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) .domain_set_attr = arm_smmu_domain_set_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) .of_xlate = arm_smmu_of_xlate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) .get_resv_regions = arm_smmu_get_resv_regions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) .put_resv_regions = generic_iommu_put_resv_regions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) .def_domain_type = arm_smmu_def_domain_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) .pgsize_bitmap = -1UL, /* Restricted during device attach */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) /* clear global FSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) * Reset stream mapping groups: Initial values mark all SMRn as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) * invalid and all S2CRn as bypass unless overridden.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) for (i = 0; i < smmu->num_mapping_groups; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) arm_smmu_write_sme(smmu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) /* Make sure all context banks are disabled and clear CB_FSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) for (i = 0; i < smmu->num_context_banks; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) arm_smmu_write_context_bank(smmu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) /* Invalidate the TLB, just in case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) /* Enable fault reporting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) reg |= (ARM_SMMU_sCR0_GFRE | ARM_SMMU_sCR0_GFIE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) ARM_SMMU_sCR0_GCFGFRE | ARM_SMMU_sCR0_GCFGFIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) /* Disable TLB broadcasting. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) reg |= (ARM_SMMU_sCR0_VMIDPNE | ARM_SMMU_sCR0_PTM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) /* Enable client access, handling unmatched streams as appropriate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) reg &= ~ARM_SMMU_sCR0_CLIENTPD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) if (disable_bypass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) reg |= ARM_SMMU_sCR0_USFCFG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) reg &= ~ARM_SMMU_sCR0_USFCFG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) /* Disable forced broadcasting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) reg &= ~ARM_SMMU_sCR0_FB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) /* Don't upgrade barriers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) reg &= ~(ARM_SMMU_sCR0_BSU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) if (smmu->features & ARM_SMMU_FEAT_VMID16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) reg |= ARM_SMMU_sCR0_VMID16EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) if (smmu->features & ARM_SMMU_FEAT_EXIDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) reg |= ARM_SMMU_sCR0_EXIDENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) if (smmu->impl && smmu->impl->reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) smmu->impl->reset(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) /* Push the button */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) arm_smmu_tlb_sync_global(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) static int arm_smmu_id_size_to_bits(int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) return 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) return 36;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) return 40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) return 42;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) return 44;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) return 48;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) dev_notice(smmu->dev, "probing hardware configuration...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) dev_notice(smmu->dev, "SMMUv%d with:\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) smmu->version == ARM_SMMU_V2 ? 2 : 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) /* ID0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) /* Restrict available stages based on module parameter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) if (force_stage == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) id &= ~(ARM_SMMU_ID0_S2TS | ARM_SMMU_ID0_NTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) else if (force_stage == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) id &= ~(ARM_SMMU_ID0_S1TS | ARM_SMMU_ID0_NTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) if (id & ARM_SMMU_ID0_S1TS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) dev_notice(smmu->dev, "\tstage 1 translation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) if (id & ARM_SMMU_ID0_S2TS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) dev_notice(smmu->dev, "\tstage 2 translation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) if (id & ARM_SMMU_ID0_NTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) dev_notice(smmu->dev, "\tnested translation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) if (!(smmu->features &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) dev_err(smmu->dev, "\tno translation support!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) if ((id & ARM_SMMU_ID0_S1TS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) dev_notice(smmu->dev, "\taddress translation ops\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) * In order for DMA API calls to work properly, we must defer to what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) * the FW says about coherency, regardless of what the hardware claims.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) * Fortunately, this also opens up a workaround for systems where the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) * ID register value has ended up configured incorrectly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) cttw_reg = !!(id & ARM_SMMU_ID0_CTTW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) if (cttw_fw || cttw_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) dev_notice(smmu->dev, "\t%scoherent table walk\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) cttw_fw ? "" : "non-");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) if (cttw_fw != cttw_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) dev_notice(smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) "\t(IDR0.CTTW overridden by FW configuration)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) /* Max. number of entries we have for stream matching/indexing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) smmu->features |= ARM_SMMU_FEAT_EXIDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) size = 1 << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) size = 1 << FIELD_GET(ARM_SMMU_ID0_NUMSIDB, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) smmu->streamid_mask = size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) if (id & ARM_SMMU_ID0_SMS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) size = FIELD_GET(ARM_SMMU_ID0_NUMSMRG, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) if (size == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) dev_err(smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) "stream-matching supported, but no SMRs present!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) /* Zero-initialised to mark as invalid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) if (!smmu->smrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) dev_notice(smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) "\tstream matching with %u register groups", size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) /* s2cr->type == 0 means translation, so initialise explicitly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) if (!smmu->s2crs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) for (i = 0; i < size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) smmu->s2crs[i] = s2cr_init_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) smmu->num_mapping_groups = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) mutex_init(&smmu->stream_map_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) spin_lock_init(&smmu->global_sync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) if (smmu->version < ARM_SMMU_V2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) !(id & ARM_SMMU_ID0_PTFS_NO_AARCH32)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) if (!(id & ARM_SMMU_ID0_PTFS_NO_AARCH32S))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) /* ID1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) /* Check for size mismatch of SMMU address space from mapped region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) size = 1 << (FIELD_GET(ARM_SMMU_ID1_NUMPAGENDXB, id) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) if (smmu->numpage != 2 * size << smmu->pgshift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) dev_warn(smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 2 * size << smmu->pgshift, smmu->numpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) /* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) smmu->numpage = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) if (smmu->num_s2_context_banks > smmu->num_context_banks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) dev_err(smmu->dev, "impossible number of S2 context banks!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) smmu->num_context_banks, smmu->num_s2_context_banks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) sizeof(*smmu->cbs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) if (!smmu->cbs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) /* ID2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_IAS, id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) smmu->ipa_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) /* The output mask is also applied for bypass */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_OAS, id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) smmu->pa_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) if (id & ARM_SMMU_ID2_VMID16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) smmu->features |= ARM_SMMU_FEAT_VMID16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) * What the page table walker can address actually depends on which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) * descriptor format is in use, but since a) we don't know that yet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) * and b) it can vary per context bank, this will have to do...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) dev_warn(smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) "failed to set DMA mask for table walker\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) if (smmu->version < ARM_SMMU_V2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) smmu->va_size = smmu->ipa_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) if (smmu->version == ARM_SMMU_V1_64K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) size = FIELD_GET(ARM_SMMU_ID2_UBS, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) smmu->va_size = arm_smmu_id_size_to_bits(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) if (id & ARM_SMMU_ID2_PTFS_4K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) if (id & ARM_SMMU_ID2_PTFS_16K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) if (id & ARM_SMMU_ID2_PTFS_64K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) if (smmu->impl && smmu->impl->cfg_probe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) ret = smmu->impl->cfg_probe(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) /* Now we've corralled the various formats, what'll it do? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) if (smmu->features &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) if (arm_smmu_ops.pgsize_bitmap == -1UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) smmu->pgsize_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) smmu->va_size, smmu->ipa_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) smmu->ipa_size, smmu->pa_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) struct arm_smmu_match_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) enum arm_smmu_arch_version version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) enum arm_smmu_implementation model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) #define ARM_SMMU_MATCH_DATA(name, ver, imp) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) static const struct of_device_id arm_smmu_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) { .compatible = "nvidia,smmu-500", .data = &arm_mmu500 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) #ifdef CONFIG_ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) switch (model) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) case ACPI_IORT_SMMU_V1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) case ACPI_IORT_SMMU_CORELINK_MMU400:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) smmu->version = ARM_SMMU_V1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) smmu->model = GENERIC_SMMU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) case ACPI_IORT_SMMU_CORELINK_MMU401:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) smmu->version = ARM_SMMU_V1_64K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) smmu->model = GENERIC_SMMU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) case ACPI_IORT_SMMU_V2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) smmu->version = ARM_SMMU_V2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) smmu->model = GENERIC_SMMU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) case ACPI_IORT_SMMU_CORELINK_MMU500:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) smmu->version = ARM_SMMU_V2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) smmu->model = ARM_MMU500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) smmu->version = ARM_SMMU_V2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) smmu->model = CAVIUM_SMMUV2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) struct device *dev = smmu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) struct acpi_iort_node *node =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) *(struct acpi_iort_node **)dev_get_platdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) struct acpi_iort_smmu *iort_smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) /* Retrieve SMMU1/2 specific data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) iort_smmu = (struct acpi_iort_smmu *)node->node_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) ret = acpi_smmu_get_data(iort_smmu->model, smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) /* Ignore the configuration access interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) smmu->num_global_irqs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) static int arm_smmu_device_dt_probe(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) const struct arm_smmu_match_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) bool legacy_binding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) if (of_property_read_u32(dev->of_node, "#global-interrupts",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) &smmu->num_global_irqs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) dev_err(dev, "missing #global-interrupts property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) data = of_device_get_match_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) smmu->version = data->version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) smmu->model = data->model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) if (legacy_binding && !using_generic_binding) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) if (!using_legacy_binding) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) IS_ENABLED(CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS) ? "DMA API" : "SMMU");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) using_legacy_binding = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) } else if (!legacy_binding && !using_legacy_binding) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) using_generic_binding = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) dev_err(dev, "not probing due to mismatched DT properties\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) if (of_dma_is_coherent(dev->of_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) static int arm_smmu_bus_init(struct iommu_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) /* Oh, for a proper bus abstraction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) if (!iommu_present(&platform_bus_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) err = bus_set_iommu(&platform_bus_type, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) #ifdef CONFIG_ARM_AMBA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) if (!iommu_present(&amba_bustype)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) err = bus_set_iommu(&amba_bustype, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) goto err_reset_platform_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) if (!iommu_present(&pci_bus_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) err = bus_set_iommu(&pci_bus_type, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) goto err_reset_amba_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) #ifdef CONFIG_FSL_MC_BUS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) if (!iommu_present(&fsl_mc_bus_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) err = bus_set_iommu(&fsl_mc_bus_type, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) goto err_reset_pci_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) err_reset_pci_ops: __maybe_unused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) bus_set_iommu(&pci_bus_type, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) err_reset_amba_ops: __maybe_unused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) #ifdef CONFIG_ARM_AMBA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) bus_set_iommu(&amba_bustype, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) err_reset_platform_ops: __maybe_unused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) bus_set_iommu(&platform_bus_type, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) static int arm_smmu_device_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) resource_size_t ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) struct arm_smmu_device *smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) int num_irqs, i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) irqreturn_t (*global_fault)(int irq, void *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) if (!smmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) dev_err(dev, "failed to allocate arm_smmu_device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) smmu->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) if (dev->of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) err = arm_smmu_device_dt_probe(pdev, smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) err = arm_smmu_device_acpi_probe(pdev, smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) ioaddr = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) smmu->base = devm_ioremap_resource(dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) if (IS_ERR(smmu->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) return PTR_ERR(smmu->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) * The resource size should effectively match the value of SMMU_TOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) * stash that temporarily until we know PAGESIZE to validate it with.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) smmu->numpage = resource_size(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) smmu = arm_smmu_impl_init(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) if (IS_ERR(smmu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) return PTR_ERR(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) num_irqs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) num_irqs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) if (num_irqs > smmu->num_global_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) smmu->num_context_irqs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) if (!smmu->num_context_irqs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) dev_err(dev, "found %d interrupts but expected at least %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) num_irqs, smmu->num_global_irqs + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) if (!smmu->irqs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) for (i = 0; i < num_irqs; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) int irq = platform_get_irq(pdev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) smmu->irqs[i] = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) err = devm_clk_bulk_get_all(dev, &smmu->clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) dev_err(dev, "failed to get clocks %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) smmu->num_clks = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) err = arm_smmu_device_cfg_probe(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) if (smmu->version == ARM_SMMU_V2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) if (smmu->num_context_banks > smmu->num_context_irqs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) "found only %d context irq(s) but %d required\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) smmu->num_context_irqs, smmu->num_context_banks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) /* Ignore superfluous interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) smmu->num_context_irqs = smmu->num_context_banks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) if (smmu->impl && smmu->impl->global_fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) global_fault = smmu->impl->global_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) global_fault = arm_smmu_global_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) for (i = 0; i < smmu->num_global_irqs; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) err = devm_request_irq(smmu->dev, smmu->irqs[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) global_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) "arm-smmu global fault",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) dev_err(dev, "failed to request global IRQ %d (%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) i, smmu->irqs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) "smmu.%pa", &ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) dev_err(dev, "Failed to register iommu in sysfs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) err = iommu_device_register(&smmu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) dev_err(dev, "Failed to register iommu\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) platform_set_drvdata(pdev, smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) arm_smmu_device_reset(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) arm_smmu_test_smr_masks(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) * We want to avoid touching dev->power.lock in fastpaths unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) * it's really going to do something useful - pm_runtime_enabled()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) * can serve as an ideal proxy for that decision. So, conditionally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) * enable pm_runtime.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) if (dev->pm_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) pm_runtime_set_active(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) * For ACPI and generic DT bindings, an SMMU will be probed before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) * any device which might need it, so we want the bus ops in place
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) * ready to handle default domain setup as soon as any SMMU exists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) if (!using_legacy_binding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) return arm_smmu_bus_init(&arm_smmu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) static int arm_smmu_device_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) if (!smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) dev_notice(&pdev->dev, "disabling translation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) arm_smmu_bus_init(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) iommu_device_unregister(&smmu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) iommu_device_sysfs_remove(&smmu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) arm_smmu_rpm_get(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) /* Turn the thing off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, ARM_SMMU_sCR0_CLIENTPD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) arm_smmu_rpm_put(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) if (pm_runtime_enabled(smmu->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) pm_runtime_force_suspend(smmu->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) clk_bulk_disable(smmu->num_clks, smmu->clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) clk_bulk_unprepare(smmu->num_clks, smmu->clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) static void arm_smmu_device_shutdown(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) arm_smmu_device_remove(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) struct arm_smmu_device *smmu = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) arm_smmu_device_reset(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) struct arm_smmu_device *smmu = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) clk_bulk_disable(smmu->num_clks, smmu->clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) if (pm_runtime_suspended(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) return arm_smmu_runtime_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) if (pm_runtime_suspended(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) return arm_smmu_runtime_suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) static const struct dev_pm_ops arm_smmu_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) arm_smmu_runtime_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) static struct platform_driver arm_smmu_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) .name = "arm-smmu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) .of_match_table = arm_smmu_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) .pm = &arm_smmu_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) .suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) .probe = arm_smmu_device_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) .remove = arm_smmu_device_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) .shutdown = arm_smmu_device_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) module_platform_driver(arm_smmu_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) MODULE_AUTHOR("Will Deacon <will@kernel.org>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) MODULE_ALIAS("platform:arm-smmu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) MODULE_LICENSE("GPL v2");