^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * IOMMU API for Renesas VMSA-compatible IPMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2014-2020 Renesas Electronics Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/dma-iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/io-pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/of_iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/sys_soc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/dma-iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define arm_iommu_create_mapping(...) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define arm_iommu_attach_device(...) -ENODEV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define arm_iommu_release_mapping(...) do {} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define arm_iommu_detach_device(...) do {} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define IPMMU_CTX_MAX 8U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define IPMMU_CTX_INVALID -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define IPMMU_UTLB_MAX 48U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct ipmmu_features {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) bool use_ns_alias_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) bool has_cache_leaf_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) unsigned int number_of_contexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) unsigned int num_utlbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) bool setup_imbuscr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) bool twobit_imttbcr_sl0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) bool reserved_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) bool cache_snoop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) unsigned int ctx_offset_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) unsigned int ctx_offset_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned int utlb_offset_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct ipmmu_vmsa_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct iommu_device iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct ipmmu_vmsa_device *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) const struct ipmmu_features *features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) unsigned int num_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) spinlock_t lock; /* Protects ctx and domains[] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) s8 utlb_ctx[IPMMU_UTLB_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct dma_iommu_mapping *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct ipmmu_vmsa_domain {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct ipmmu_vmsa_device *mmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct iommu_domain io_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct io_pgtable_cfg cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct io_pgtable_ops *iop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned int context_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct mutex mutex; /* Protects mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define TLB_LOOP_TIMEOUT 100 /* 100us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * Registers Definition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define IM_NS_ALIAS_OFFSET 0x800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* MMU "context" registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define IMCTR 0x0000 /* R-Car Gen2/3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define IMCTR_INTEN (1 << 2) /* R-Car Gen2/3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define IMCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define IMCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define IMTTBCR 0x0008 /* R-Car Gen2/3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define IMTTBCR_EAE (1 << 31) /* R-Car Gen2/3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) /* R-Car Gen2 only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define IMTTBCR_ORGN0_WB_WA (1 << 10) /* R-Car Gen2 only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define IMTTBCR_IRGN0_WB_WA (1 << 8) /* R-Car Gen2 only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define IMTTBCR_SL0_LVL_1 (1 << 4) /* R-Car Gen2 only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define IMBUSCR 0x000c /* R-Car Gen2 only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define IMBUSCR_DVM (1 << 2) /* R-Car Gen2 only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define IMBUSCR_BUSSEL_MASK (3 << 0) /* R-Car Gen2 only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define IMTTLBR0 0x0010 /* R-Car Gen2/3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define IMTTUBR0 0x0014 /* R-Car Gen2/3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define IMSTR 0x0020 /* R-Car Gen2/3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define IMSTR_MHIT (1 << 4) /* R-Car Gen2/3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define IMSTR_ABORT (1 << 2) /* R-Car Gen2/3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define IMSTR_PF (1 << 1) /* R-Car Gen2/3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define IMSTR_TF (1 << 0) /* R-Car Gen2/3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define IMMAIR0 0x0028 /* R-Car Gen2/3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define IMELAR 0x0030 /* R-Car Gen2/3, IMEAR on R-Car Gen2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define IMEUAR 0x0034 /* R-Car Gen3 only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* uTLB registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define IMUCTR0(n) (0x0300 + ((n) * 16)) /* R-Car Gen2/3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) /* R-Car Gen3 only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define IMUCTR_TTSEL_MMU(n) ((n) << 4) /* R-Car Gen2/3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define IMUCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define IMUCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define IMUASID0(n) (0x0308 + ((n) * 16)) /* R-Car Gen2/3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) /* R-Car Gen3 only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * Root device handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static struct platform_driver ipmmu_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return mmu->root == mmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static int __ipmmu_check_device(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct ipmmu_vmsa_device **rootp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (ipmmu_is_root(mmu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) *rootp = mmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static struct ipmmu_vmsa_device *ipmmu_find_root(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct ipmmu_vmsa_device *root = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return driver_for_each_device(&ipmmu_driver.driver, NULL, &root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) __ipmmu_check_device) == 0 ? root : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * Read/Write Access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return ioread32(mmu->base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) iowrite32(data, mmu->base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) unsigned int context_id, unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return mmu->features->ctx_offset_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) context_id * mmu->features->ctx_offset_stride + reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) unsigned int context_id, unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) unsigned int context_id, unsigned int reg, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) unsigned int reg, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) unsigned int reg, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (domain->mmu != domain->mmu->root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return mmu->features->utlb_offset_base + reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) unsigned int utlb, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) unsigned int utlb, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * TLB and microTLB Management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /* Wait for any pending TLB invalidations to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) unsigned int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (++count == TLB_LOOP_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) dev_err_ratelimited(domain->mmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) "TLB sync timed out -- MMU may be deadlocked\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) reg = ipmmu_ctx_read_root(domain, IMCTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) reg |= IMCTR_FLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ipmmu_ctx_write_all(domain, IMCTR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) ipmmu_tlb_sync(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * Enable MMU translation for the microTLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) unsigned int utlb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct ipmmu_vmsa_device *mmu = domain->mmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * TODO: Reference-count the microTLB as several bus masters can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * connected to the same microTLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /* TODO: What should we set the ASID to ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) ipmmu_imuasid_write(mmu, utlb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /* TODO: Do we need to flush the microTLB ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) IMUCTR_FLUSH | IMUCTR_MMUEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) mmu->utlb_ctx[utlb] = domain->context_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * Disable MMU translation for the microTLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) unsigned int utlb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct ipmmu_vmsa_device *mmu = domain->mmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) ipmmu_imuctr_write(mmu, utlb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static void ipmmu_tlb_flush_all(void *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct ipmmu_vmsa_domain *domain = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) ipmmu_tlb_invalidate(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static void ipmmu_tlb_flush(unsigned long iova, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) size_t granule, void *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) ipmmu_tlb_flush_all(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static const struct iommu_flush_ops ipmmu_flush_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) .tlb_flush_all = ipmmu_tlb_flush_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) .tlb_flush_walk = ipmmu_tlb_flush,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * Domain/Context Management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct ipmmu_vmsa_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) spin_lock_irqsave(&mmu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (ret != mmu->num_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) mmu->domains[ret] = domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) set_bit(ret, mmu->ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) spin_unlock_irqrestore(&mmu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) unsigned int context_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) spin_lock_irqsave(&mmu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) clear_bit(context_id, mmu->ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) mmu->domains[context_id] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) spin_unlock_irqrestore(&mmu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) u64 ttbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /* TTBR0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * TTBCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * We use long descriptors and allocate the whole 32-bit VA space to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * TTBR0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (domain->mmu->features->twobit_imttbcr_sl0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) tmp = IMTTBCR_SL0_TWOBIT_LVL_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) tmp = IMTTBCR_SL0_LVL_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (domain->mmu->features->cache_snoop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) IMTTBCR_IRGN0_WB_WA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /* MAIR0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) ipmmu_ctx_write_root(domain, IMMAIR0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) domain->cfg.arm_lpae_s1_cfg.mair);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) /* IMBUSCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (domain->mmu->features->setup_imbuscr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) ipmmu_ctx_write_root(domain, IMBUSCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) ipmmu_ctx_read_root(domain, IMBUSCR) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * IMSTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * Clear all interrupt flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * IMCTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * Enable the MMU and interrupt generation. The long-descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * translation table format doesn't use TEX remapping. Don't enable AF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * software management as we have no use for it. Flush the TLB as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * required when modifying the context registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) ipmmu_ctx_write_all(domain, IMCTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * Allocate the page table operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * access, Long-descriptor format" that the NStable bit being set in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * table descriptor will result in the NStable and NS bits of all child
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * entries being ignored and considered as being set. The IPMMU seems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * not to comply with this, as it generates a secure access page fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * if any of the NStable and NS bits isn't set when running in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * non-secure mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) domain->cfg.ias = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) domain->cfg.oas = 40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) domain->cfg.tlb = &ipmmu_flush_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) domain->io_domain.geometry.force_aperture = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * TODO: Add support for coherent walk through CCI with DVM and remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * cache handling. For now, delegate it to the io-pgtable code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) domain->cfg.coherent_walk = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) domain->cfg.iommu_dev = domain->mmu->root->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * Find an unused context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) domain->context_id = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (!domain->iop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) ipmmu_domain_free_context(domain->mmu->root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) domain->context_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) ipmmu_domain_setup_context(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (!domain->mmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * Disable the context. Flush the TLB as required when modifying the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * context registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * TODO: Is TLB flush really needed ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) ipmmu_tlb_sync(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) ipmmu_domain_free_context(domain->mmu->root, domain->context_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * Fault Handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct ipmmu_vmsa_device *mmu = domain->mmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) unsigned long iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) status = ipmmu_ctx_read_root(domain, IMSTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (!(status & err_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) iova = ipmmu_ctx_read_root(domain, IMELAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (IS_ENABLED(CONFIG_64BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * Clear the error status flags. Unlike traditional interrupt flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * registers that must be cleared by writing 1, this status register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * seems to require 0. The error address register must be read before,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * otherwise its value will be 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) ipmmu_ctx_write_root(domain, IMSTR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /* Log fatal errors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (status & IMSTR_MHIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (status & IMSTR_ABORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (!(status & (IMSTR_PF | IMSTR_TF)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * Try to handle page faults and translation faults.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * TODO: We need to look up the faulty device based on the I/O VA. Use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * the IOMMU device for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) dev_err_ratelimited(mmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) "Unhandled fault: status 0x%08x iova 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) status, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) static irqreturn_t ipmmu_irq(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct ipmmu_vmsa_device *mmu = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) irqreturn_t status = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) spin_lock_irqsave(&mmu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * Check interrupts for all active contexts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) for (i = 0; i < mmu->num_ctx; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (!mmu->domains[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) status = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) spin_unlock_irqrestore(&mmu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * IOMMU Operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) struct ipmmu_vmsa_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) domain = kzalloc(sizeof(*domain), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (!domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) mutex_init(&domain->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) return &domain->io_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) struct iommu_domain *io_domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) case IOMMU_DOMAIN_UNMANAGED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) io_domain = __ipmmu_domain_alloc(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) case IOMMU_DOMAIN_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) io_domain = __ipmmu_domain_alloc(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (io_domain && iommu_get_dma_cookie(io_domain)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) kfree(io_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) io_domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return io_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) static void ipmmu_domain_free(struct iommu_domain *io_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * Free the domain resources. We assume that all devices have already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * been detached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) iommu_put_dma_cookie(io_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) ipmmu_domain_destroy_context(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) free_io_pgtable_ops(domain->iop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) kfree(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) static int ipmmu_attach_device(struct iommu_domain *io_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (!mmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) dev_err(dev, "Cannot attach to IPMMU\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) mutex_lock(&domain->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (!domain->mmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /* The domain hasn't been used yet, initialize it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) domain->mmu = mmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) ret = ipmmu_domain_init_context(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) dev_err(dev, "Unable to initialize IPMMU context\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) domain->mmu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) dev_info(dev, "Using IPMMU context %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) domain->context_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) } else if (domain->mmu != mmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * Something is wrong, we can't attach two devices using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * different IOMMUs to the same domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) dev_name(mmu->dev), dev_name(domain->mmu->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) mutex_unlock(&domain->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) for (i = 0; i < fwspec->num_ids; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) ipmmu_utlb_enable(domain, fwspec->ids[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) static void ipmmu_detach_device(struct iommu_domain *io_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) for (i = 0; i < fwspec->num_ids; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) ipmmu_utlb_disable(domain, fwspec->ids[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * TODO: Optimize by disabling the context when no device is attached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (!domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return domain->iop->map(domain->iop, iova, paddr, size, prot, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) size_t size, struct iommu_iotlb_gather *gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return domain->iop->unmap(domain->iop, iova, size, gather);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (domain->mmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) ipmmu_tlb_flush_all(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) static void ipmmu_iotlb_sync(struct iommu_domain *io_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) struct iommu_iotlb_gather *gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) ipmmu_flush_iotlb_all(io_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) dma_addr_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) /* TODO: Is locking needed ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return domain->iop->iova_to_phys(domain->iop, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) static int ipmmu_init_platform_device(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) struct of_phandle_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) struct platform_device *ipmmu_pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) ipmmu_pdev = of_find_device_by_node(args->np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (!ipmmu_pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) dev_iommu_priv_set(dev, platform_get_drvdata(ipmmu_pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) static const struct soc_device_attribute soc_rcar_gen3[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) { .soc_id = "r8a774a1", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) { .soc_id = "r8a774b1", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) { .soc_id = "r8a774c0", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) { .soc_id = "r8a774e1", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) { .soc_id = "r8a7795", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) { .soc_id = "r8a77961", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) { .soc_id = "r8a7796", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) { .soc_id = "r8a77965", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) { .soc_id = "r8a77970", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) { .soc_id = "r8a77990", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) { .soc_id = "r8a77995", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) { /* sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) static const struct soc_device_attribute soc_rcar_gen3_whitelist[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) { .soc_id = "r8a774b1", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) { .soc_id = "r8a774c0", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) { .soc_id = "r8a774e1", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) { .soc_id = "r8a7795", .revision = "ES3.*" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) { .soc_id = "r8a77961", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) { .soc_id = "r8a77965", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) { .soc_id = "r8a77990", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) { .soc_id = "r8a77995", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) { /* sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) static const char * const rcar_gen3_slave_whitelist[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) static bool ipmmu_slave_whitelist(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * For R-Car Gen3 use a white list to opt-in slave devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * For Other SoCs, this returns true anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (!soc_device_match(soc_rcar_gen3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) /* Check whether this R-Car Gen3 can use the IPMMU correctly or not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (!soc_device_match(soc_rcar_gen3_whitelist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) /* Check whether this slave device can work with the IPMMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) for (i = 0; i < ARRAY_SIZE(rcar_gen3_slave_whitelist); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (!strcmp(dev_name(dev), rcar_gen3_slave_whitelist[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) /* Otherwise, do not allow use of IPMMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) static int ipmmu_of_xlate(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) struct of_phandle_args *spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (!ipmmu_slave_whitelist(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) iommu_fwspec_add_ids(dev, spec->args, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) /* Initialize once - xlate() will call multiple times */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (to_ipmmu(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) return ipmmu_init_platform_device(dev, spec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) static int ipmmu_init_arm_mapping(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * Create the ARM mapping, used by the ARM DMA mapping core to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * VAs. This will allocate a corresponding IOMMU domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * TODO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * - Create one mapping per context (TLB).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * - Make the mapping size configurable ? We currently use a 2GB mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * at a 1GB offset to ensure that NULL VAs will fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (!mmu->mapping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct dma_iommu_mapping *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) mapping = arm_iommu_create_mapping(&platform_bus_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) SZ_1G, SZ_2G);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (IS_ERR(mapping)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) ret = PTR_ERR(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) mmu->mapping = mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /* Attach the ARM VA mapping to the device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) ret = arm_iommu_attach_device(dev, mmu->mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) dev_err(dev, "Failed to attach device to VA mapping\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (mmu->mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) arm_iommu_release_mapping(mmu->mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) static struct iommu_device *ipmmu_probe_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * Only let through devices that have been verified in xlate()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (!mmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return &mmu->iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) static void ipmmu_probe_finalize(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) ret = ipmmu_init_arm_mapping(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) static void ipmmu_release_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) arm_iommu_detach_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) static struct iommu_group *ipmmu_find_group(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (mmu->group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return iommu_group_ref_get(mmu->group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) group = iommu_group_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) if (!IS_ERR(group))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) mmu->group = group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) static const struct iommu_ops ipmmu_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) .domain_alloc = ipmmu_domain_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) .domain_free = ipmmu_domain_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) .attach_dev = ipmmu_attach_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) .detach_dev = ipmmu_detach_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) .map = ipmmu_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) .unmap = ipmmu_unmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) .flush_iotlb_all = ipmmu_flush_iotlb_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) .iotlb_sync = ipmmu_iotlb_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) .iova_to_phys = ipmmu_iova_to_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) .probe_device = ipmmu_probe_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) .release_device = ipmmu_release_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) .probe_finalize = ipmmu_probe_finalize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) .device_group = IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) ? generic_device_group : ipmmu_find_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) .of_xlate = ipmmu_of_xlate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * Probe/remove and init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) /* Disable all contexts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) for (i = 0; i < mmu->num_ctx; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) ipmmu_ctx_write(mmu, i, IMCTR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) static const struct ipmmu_features ipmmu_features_default = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) .use_ns_alias_offset = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) .has_cache_leaf_nodes = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) .number_of_contexts = 1, /* software only tested with one context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) .num_utlbs = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) .setup_imbuscr = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) .twobit_imttbcr_sl0 = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) .reserved_context = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) .cache_snoop = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) .ctx_offset_base = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) .ctx_offset_stride = 0x40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) .utlb_offset_base = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) .use_ns_alias_offset = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) .has_cache_leaf_nodes = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) .number_of_contexts = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) .num_utlbs = 48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) .setup_imbuscr = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) .twobit_imttbcr_sl0 = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) .reserved_context = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) .cache_snoop = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) .ctx_offset_base = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) .ctx_offset_stride = 0x40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) .utlb_offset_base = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) static const struct of_device_id ipmmu_of_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) .compatible = "renesas,ipmmu-vmsa",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) .data = &ipmmu_features_default,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) .compatible = "renesas,ipmmu-r8a774a1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) .data = &ipmmu_features_rcar_gen3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) .compatible = "renesas,ipmmu-r8a774b1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) .data = &ipmmu_features_rcar_gen3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) .compatible = "renesas,ipmmu-r8a774c0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) .data = &ipmmu_features_rcar_gen3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) .compatible = "renesas,ipmmu-r8a774e1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) .data = &ipmmu_features_rcar_gen3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) .compatible = "renesas,ipmmu-r8a7795",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) .data = &ipmmu_features_rcar_gen3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) .compatible = "renesas,ipmmu-r8a7796",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) .data = &ipmmu_features_rcar_gen3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) .compatible = "renesas,ipmmu-r8a77961",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) .data = &ipmmu_features_rcar_gen3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) .compatible = "renesas,ipmmu-r8a77965",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) .data = &ipmmu_features_rcar_gen3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) .compatible = "renesas,ipmmu-r8a77970",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) .data = &ipmmu_features_rcar_gen3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) .compatible = "renesas,ipmmu-r8a77990",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) .data = &ipmmu_features_rcar_gen3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) .compatible = "renesas,ipmmu-r8a77995",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) .data = &ipmmu_features_rcar_gen3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) /* Terminator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) static int ipmmu_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) struct ipmmu_vmsa_device *mmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) if (!mmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) dev_err(&pdev->dev, "cannot allocate device data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) mmu->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) spin_lock_init(&mmu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) mmu->features = of_device_get_match_data(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) /* Map I/O memory and request IRQ. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) mmu->base = devm_ioremap_resource(&pdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (IS_ERR(mmu->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) return PTR_ERR(mmu->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * The IPMMU has two register banks, for secure and non-secure modes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * The bank mapped at the beginning of the IPMMU address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * corresponds to the running mode of the CPU. When running in secure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * mode the non-secure register bank is also available at an offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * Secure mode operation isn't clearly documented and is thus currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * not implemented in the driver. Furthermore, preliminary tests of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * non-secure operation with the main register bank were not successful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * Offset the registers base unconditionally to point to the non-secure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * alias space for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (mmu->features->use_ns_alias_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) mmu->base += IM_NS_ALIAS_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * Determine if this IPMMU instance is a root device by checking for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (!mmu->features->has_cache_leaf_nodes ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) !of_find_property(pdev->dev.of_node, "renesas,ipmmu-main", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) mmu->root = mmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) mmu->root = ipmmu_find_root();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * Wait until the root device has been registered for sure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (!mmu->root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) return -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) /* Root devices have mandatory IRQs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) if (ipmmu_is_root(mmu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) dev_name(&pdev->dev), mmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) ipmmu_device_reset(mmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (mmu->features->reserved_context) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) dev_info(&pdev->dev, "IPMMU context 0 is reserved\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) set_bit(0, mmu->ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) * Register the IPMMU to the IOMMU subsystem in the following cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) * - R-Car Gen2 IPMMU (all devices registered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) dev_name(&pdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) iommu_device_set_ops(&mmu->iommu, &ipmmu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) iommu_device_set_fwnode(&mmu->iommu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) &pdev->dev.of_node->fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) ret = iommu_device_register(&mmu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) #if defined(CONFIG_IOMMU_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (!iommu_present(&platform_bus_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) bus_set_iommu(&platform_bus_type, &ipmmu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) * We can't create the ARM mapping here as it requires the bus to have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * an IOMMU, which only happens when bus_set_iommu() is called in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * ipmmu_init() after the probe function returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) platform_set_drvdata(pdev, mmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) static int ipmmu_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) iommu_device_sysfs_remove(&mmu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) iommu_device_unregister(&mmu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) arm_iommu_release_mapping(mmu->mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) ipmmu_device_reset(mmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) static int ipmmu_resume_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /* Reset root MMU and restore contexts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (ipmmu_is_root(mmu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) ipmmu_device_reset(mmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) for (i = 0; i < mmu->num_ctx; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (!mmu->domains[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) ipmmu_domain_setup_context(mmu->domains[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) /* Re-enable active micro-TLBs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) for (i = 0; i < mmu->features->num_utlbs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) static const struct dev_pm_ops ipmmu_pm = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) #define DEV_PM_OPS &ipmmu_pm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) #define DEV_PM_OPS NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) #endif /* CONFIG_PM_SLEEP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) static struct platform_driver ipmmu_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) .name = "ipmmu-vmsa",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) .of_match_table = of_match_ptr(ipmmu_of_ids),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) .pm = DEV_PM_OPS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) .probe = ipmmu_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) .remove = ipmmu_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) static int __init ipmmu_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) static bool setup_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (setup_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) np = of_find_matching_node(NULL, ipmmu_of_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (!np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) ret = platform_driver_register(&ipmmu_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (!iommu_present(&platform_bus_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) bus_set_iommu(&platform_bus_type, &ipmmu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) setup_done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) subsys_initcall(ipmmu_init);