Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * IOMMU API for ARM architected SMMUv3 implementations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2015 ARM Limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Author: Will Deacon <will.deacon@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * This driver is powered by bad coffee and bombay mix.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/acpi_iort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/crash_dump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/dma-iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/io-pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/msi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/of_iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/pci-ats.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/amba/bus.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include "arm-smmu-v3.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) static bool disable_bypass = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) module_param(disable_bypass, bool, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) MODULE_PARM_DESC(disable_bypass,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) static bool disable_msipolling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) module_param(disable_msipolling, bool, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) MODULE_PARM_DESC(disable_msipolling,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	"Disable MSI-based polling for CMD_SYNC completion.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) enum arm_smmu_msi_index {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	EVTQ_MSI_INDEX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	GERROR_MSI_INDEX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	PRIQ_MSI_INDEX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	ARM_SMMU_MAX_MSIS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	[EVTQ_MSI_INDEX] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 		ARM_SMMU_EVTQ_IRQ_CFG0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 		ARM_SMMU_EVTQ_IRQ_CFG1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 		ARM_SMMU_EVTQ_IRQ_CFG2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	[GERROR_MSI_INDEX] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 		ARM_SMMU_GERROR_IRQ_CFG0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 		ARM_SMMU_GERROR_IRQ_CFG1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 		ARM_SMMU_GERROR_IRQ_CFG2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	[PRIQ_MSI_INDEX] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 		ARM_SMMU_PRIQ_IRQ_CFG0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 		ARM_SMMU_PRIQ_IRQ_CFG1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 		ARM_SMMU_PRIQ_IRQ_CFG2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) struct arm_smmu_option_prop {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	u32 opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	const char *prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) DEFINE_XARRAY_ALLOC1(arm_smmu_asid_xa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) DEFINE_MUTEX(arm_smmu_asid_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) static struct arm_smmu_option_prop arm_smmu_options[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	{ ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	{ ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	{ 0, NULL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) static inline void __iomem *arm_smmu_page1_fixup(unsigned long offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 						 struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	if (offset > SZ_64K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 		return smmu->page1 + offset - SZ_64K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	return smmu->base + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	return container_of(dom, struct arm_smmu_domain, domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) static void parse_driver_options(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 		if (of_property_read_bool(smmu->dev->of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 						arm_smmu_options[i].prop)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 			smmu->options |= arm_smmu_options[i].opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 			dev_notice(smmu->dev, "option %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 				arm_smmu_options[i].prop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	} while (arm_smmu_options[++i].opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) /* Low-level queue manipulation functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) static bool queue_has_space(struct arm_smmu_ll_queue *q, u32 n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	u32 space, prod, cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	prod = Q_IDX(q, q->prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	cons = Q_IDX(q, q->cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	if (Q_WRP(q, q->prod) == Q_WRP(q, q->cons))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		space = (1 << q->max_n_shift) - (prod - cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		space = cons - prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	return space >= n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) static bool queue_full(struct arm_smmu_ll_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	       Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) static bool queue_empty(struct arm_smmu_ll_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	       Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) static bool queue_consumed(struct arm_smmu_ll_queue *q, u32 prod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	return ((Q_WRP(q, q->cons) == Q_WRP(q, prod)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 		(Q_IDX(q, q->cons) > Q_IDX(q, prod))) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	       ((Q_WRP(q, q->cons) != Q_WRP(q, prod)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 		(Q_IDX(q, q->cons) <= Q_IDX(q, prod)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) static void queue_sync_cons_out(struct arm_smmu_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	 * Ensure that all CPU accesses (reads and writes) to the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	 * are complete before we update the cons pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	__iomb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	writel_relaxed(q->llq.cons, q->cons_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) static void queue_inc_cons(struct arm_smmu_ll_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	q->cons = Q_OVF(q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) static int queue_sync_prod_in(struct arm_smmu_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	u32 prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	 * We can't use the _relaxed() variant here, as we must prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	 * speculative reads of the queue before we have determined that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	 * prod has indeed moved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	prod = readl(q->prod_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	if (Q_OVF(prod) != Q_OVF(q->llq.prod))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		ret = -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	q->llq.prod = prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) static u32 queue_inc_prod_n(struct arm_smmu_ll_queue *q, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	return Q_OVF(q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) static void queue_poll_init(struct arm_smmu_device *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 			    struct arm_smmu_queue_poll *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	qp->delay = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	qp->spin_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	qp->wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	qp->timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) static int queue_poll(struct arm_smmu_queue_poll *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	if (ktime_compare(ktime_get(), qp->timeout) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	if (qp->wfe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		wfe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	} else if (++qp->spin_cnt < ARM_SMMU_POLL_SPIN_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		udelay(qp->delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		qp->delay *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		qp->spin_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) static void queue_write(__le64 *dst, u64 *src, size_t n_dwords)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	for (i = 0; i < n_dwords; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		*dst++ = cpu_to_le64(*src++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) static void queue_read(u64 *dst, __le64 *src, size_t n_dwords)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	for (i = 0; i < n_dwords; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 		*dst++ = le64_to_cpu(*src++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	if (queue_empty(&q->llq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	queue_read(ent, Q_ENT(q, q->llq.cons), q->ent_dwords);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	queue_inc_cons(&q->llq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	queue_sync_cons_out(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) /* High-level queue accessors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	memset(cmd, 0, 1 << CMDQ_ENT_SZ_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	cmd[0] |= FIELD_PREP(CMDQ_0_OP, ent->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	switch (ent->opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	case CMDQ_OP_TLBI_EL2_ALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	case CMDQ_OP_TLBI_NSNH_ALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	case CMDQ_OP_PREFETCH_CFG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		cmd[0] |= FIELD_PREP(CMDQ_PREFETCH_0_SID, ent->prefetch.sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		cmd[1] |= FIELD_PREP(CMDQ_PREFETCH_1_SIZE, ent->prefetch.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	case CMDQ_OP_CFGI_CD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SSID, ent->cfgi.ssid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	case CMDQ_OP_CFGI_STE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_LEAF, ent->cfgi.leaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	case CMDQ_OP_CFGI_CD_ALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	case CMDQ_OP_CFGI_ALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		/* Cover the entire SID range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	case CMDQ_OP_TLBI_NH_VA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TG, ent->tlbi.tg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	case CMDQ_OP_TLBI_S2_IPA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 		cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 		cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TG, ent->tlbi.tg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	case CMDQ_OP_TLBI_NH_ASID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	case CMDQ_OP_TLBI_S12_VMALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	case CMDQ_OP_ATC_INV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		cmd[0] |= FIELD_PREP(CMDQ_ATC_0_GLOBAL, ent->atc.global);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SSID, ent->atc.ssid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SID, ent->atc.sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		cmd[1] |= FIELD_PREP(CMDQ_ATC_1_SIZE, ent->atc.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		cmd[1] |= ent->atc.addr & CMDQ_ATC_1_ADDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	case CMDQ_OP_PRI_RESP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SSID, ent->pri.ssid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SID, ent->pri.sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		cmd[1] |= FIELD_PREP(CMDQ_PRI_1_GRPID, ent->pri.grpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		switch (ent->pri.resp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		case PRI_RESP_DENY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		case PRI_RESP_FAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		case PRI_RESP_SUCC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		cmd[1] |= FIELD_PREP(CMDQ_PRI_1_RESP, ent->pri.resp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	case CMDQ_OP_CMD_SYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		if (ent->sync.msiaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 			cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 			cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 			cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 					 u32 prod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	struct arm_smmu_queue *q = &smmu->cmdq.q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	struct arm_smmu_cmdq_ent ent = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		.opcode = CMDQ_OP_CMD_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	 * Beware that Hi16xx adds an extra 32 bits of goodness to its MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	 * payload, so the write will zero the entire command on that platform.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	if (smmu->options & ARM_SMMU_OPT_MSIPOLL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		ent.sync.msiaddr = q->base_dma + Q_IDX(&q->llq, prod) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 				   q->ent_dwords * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	arm_smmu_cmdq_build_cmd(cmd, &ent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	static const char *cerror_str[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		[CMDQ_ERR_CERROR_NONE_IDX]	= "No error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		[CMDQ_ERR_CERROR_ILL_IDX]	= "Illegal command",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		[CMDQ_ERR_CERROR_ABT_IDX]	= "Abort on command fetch",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		[CMDQ_ERR_CERROR_ATC_INV_IDX]	= "ATC invalidate timeout",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	u64 cmd[CMDQ_ENT_DWORDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	struct arm_smmu_queue *q = &smmu->cmdq.q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	u32 cons = readl_relaxed(q->cons_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	u32 idx = FIELD_GET(CMDQ_CONS_ERR, cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	struct arm_smmu_cmdq_ent cmd_sync = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		.opcode = CMDQ_OP_CMD_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		idx < ARRAY_SIZE(cerror_str) ?  cerror_str[idx] : "Unknown");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	switch (idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	case CMDQ_ERR_CERROR_ABT_IDX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		dev_err(smmu->dev, "retrying command fetch\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	case CMDQ_ERR_CERROR_NONE_IDX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	case CMDQ_ERR_CERROR_ATC_INV_IDX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		 * ATC Invalidation Completion timeout. CONS is still pointing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		 * at the CMD_SYNC. Attempt to complete other pending commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		 * by repeating the CMD_SYNC, though we might well end up back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		 * here since the ATC invalidation may still be pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	case CMDQ_ERR_CERROR_ILL_IDX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	 * We may have concurrent producers, so we need to be careful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	 * not to touch any of the shadow cmdq state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	dev_err(smmu->dev, "skipping command in error state:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	for (i = 0; i < ARRAY_SIZE(cmd); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	/* Convert the erroneous command into a CMD_SYNC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	if (arm_smmu_cmdq_build_cmd(cmd, &cmd_sync)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		dev_err(smmu->dev, "failed to convert to CMD_SYNC\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416)  * Command queue locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417)  * This is a form of bastardised rwlock with the following major changes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419)  * - The only LOCK routines are exclusive_trylock() and shared_lock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420)  *   Neither have barrier semantics, and instead provide only a control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421)  *   dependency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423)  * - The UNLOCK routines are supplemented with shared_tryunlock(), which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424)  *   fails if the caller appears to be the last lock holder (yes, this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425)  *   racy). All successful UNLOCK routines have RELEASE semantics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) static void arm_smmu_cmdq_shared_lock(struct arm_smmu_cmdq *cmdq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	 * We can try to avoid the cmpxchg() loop by simply incrementing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	 * lock counter. When held in exclusive state, the lock counter is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	 * to INT_MIN so these increments won't hurt as the value will remain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	 * negative.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	if (atomic_fetch_inc_relaxed(&cmdq->lock) >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		val = atomic_cond_read_relaxed(&cmdq->lock, VAL >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	} while (atomic_cmpxchg_relaxed(&cmdq->lock, val, val + 1) != val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) static void arm_smmu_cmdq_shared_unlock(struct arm_smmu_cmdq *cmdq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	(void)atomic_dec_return_release(&cmdq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) static bool arm_smmu_cmdq_shared_tryunlock(struct arm_smmu_cmdq *cmdq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	if (atomic_read(&cmdq->lock) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	arm_smmu_cmdq_shared_unlock(cmdq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) #define arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) ({									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	bool __ret;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	local_irq_save(flags);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	__ret = !atomic_cmpxchg_relaxed(&cmdq->lock, 0, INT_MIN);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	if (!__ret)							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		local_irq_restore(flags);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	__ret;								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) #define arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) ({									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	atomic_set_release(&cmdq->lock, 0);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	local_irq_restore(flags);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477)  * Command queue insertion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478)  * This is made fiddly by our attempts to achieve some sort of scalability
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479)  * since there is one queue shared amongst all of the CPUs in the system.  If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480)  * you like mixed-size concurrency, dependency ordering and relaxed atomics,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481)  * then you'll *love* this monstrosity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483)  * The basic idea is to split the queue up into ranges of commands that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484)  * owned by a given CPU; the owner may not have written all of the commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485)  * itself, but is responsible for advancing the hardware prod pointer when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486)  * the time comes. The algorithm is roughly:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488)  * 	1. Allocate some space in the queue. At this point we also discover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489)  *	   whether the head of the queue is currently owned by another CPU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490)  *	   or whether we are the owner.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492)  *	2. Write our commands into our allocated slots in the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494)  *	3. Mark our slots as valid in arm_smmu_cmdq.valid_map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496)  *	4. If we are an owner:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497)  *		a. Wait for the previous owner to finish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498)  *		b. Mark the queue head as unowned, which tells us the range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499)  *		   that we are responsible for publishing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500)  *		c. Wait for all commands in our owned range to become valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501)  *		d. Advance the hardware prod pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502)  *		e. Tell the next owner we've finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504)  *	5. If we are inserting a CMD_SYNC (we may or may not have been an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505)  *	   owner), then we need to stick around until it has completed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506)  *		a. If we have MSIs, the SMMU can write back into the CMD_SYNC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507)  *		   to clear the first 4 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508)  *		b. Otherwise, we spin waiting for the hardware cons pointer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509)  *		   advance past our command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511)  * The devil is in the details, particularly the use of locking for handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512)  * SYNC completion and freeing up space in the queue before we think that it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513)  * full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) static void __arm_smmu_cmdq_poll_set_valid_map(struct arm_smmu_cmdq *cmdq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 					       u32 sprod, u32 eprod, bool set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	u32 swidx, sbidx, ewidx, ebidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	struct arm_smmu_ll_queue llq = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		.max_n_shift	= cmdq->q.llq.max_n_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		.prod		= sprod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	ewidx = BIT_WORD(Q_IDX(&llq, eprod));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	ebidx = Q_IDX(&llq, eprod) % BITS_PER_LONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	while (llq.prod != eprod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		unsigned long mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		atomic_long_t *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		u32 limit = BITS_PER_LONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		swidx = BIT_WORD(Q_IDX(&llq, llq.prod));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		sbidx = Q_IDX(&llq, llq.prod) % BITS_PER_LONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		ptr = &cmdq->valid_map[swidx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		if ((swidx == ewidx) && (sbidx < ebidx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 			limit = ebidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		mask = GENMASK(limit - 1, sbidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		 * The valid bit is the inverse of the wrap bit. This means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		 * that a zero-initialised queue is invalid and, after marking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		 * all entries as valid, they become invalid again when we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		 * wrap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		if (set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 			atomic_long_xor(mask, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		} else { /* Poll */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 			unsigned long valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 			valid = (ULONG_MAX + !!Q_WRP(&llq, llq.prod)) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 			atomic_long_cond_read_relaxed(ptr, (VAL & mask) == valid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		llq.prod = queue_inc_prod_n(&llq, limit - sbidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) /* Mark all entries in the range [sprod, eprod) as valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) static void arm_smmu_cmdq_set_valid_map(struct arm_smmu_cmdq *cmdq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 					u32 sprod, u32 eprod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	__arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) /* Wait for all entries in the range [sprod, eprod) to become valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 					 u32 sprod, u32 eprod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	__arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) /* Wait for the command queue to become non-full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 					     struct arm_smmu_ll_queue *llq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	struct arm_smmu_queue_poll qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	 * Try to update our copy of cons by grabbing exclusive cmdq access. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	 * that fails, spin until somebody else updates it for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	if (arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		llq->val = READ_ONCE(cmdq->q.llq.val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	queue_poll_init(smmu, &qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		llq->val = READ_ONCE(smmu->cmdq.q.llq.val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		if (!queue_full(llq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		ret = queue_poll(&qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	} while (!ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608)  * Wait until the SMMU signals a CMD_SYNC completion MSI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609)  * Must be called with the cmdq lock held in some capacity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 					  struct arm_smmu_ll_queue *llq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	struct arm_smmu_queue_poll qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	queue_poll_init(smmu, &qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	 * The MSI won't generate an event, since it's being written back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	 * into the command queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	qp.wfe = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	smp_cond_load_relaxed(cmd, !VAL || (ret = queue_poll(&qp)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	llq->cons = ret ? llq->prod : queue_inc_prod_n(llq, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632)  * Wait until the SMMU cons index passes llq->prod.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633)  * Must be called with the cmdq lock held in some capacity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 					       struct arm_smmu_ll_queue *llq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	struct arm_smmu_queue_poll qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	u32 prod = llq->prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	queue_poll_init(smmu, &qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	llq->val = READ_ONCE(smmu->cmdq.q.llq.val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		if (queue_consumed(llq, prod))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		ret = queue_poll(&qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		 * This needs to be a readl() so that our subsequent call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		 * to arm_smmu_cmdq_shared_tryunlock() can fail accurately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		 * Specifically, we need to ensure that we observe all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		 * shared_lock()s by other CMD_SYNCs that share our owner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		 * so that a failing call to tryunlock() means that we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		 * the last one out and therefore we can safely advance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		 * cmdq->q.llq.cons. Roughly speaking:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		 * CPU 0		CPU1			CPU2 (us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		 * if (sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		 * 	shared_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		 * dma_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		 * set_valid_map();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		 * 			if (owner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		 *				poll_valid_map();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		 *				<control dependency>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		 *				writel(prod_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		 *						readl(cons_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		 *						tryunlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		 * Requires us to see CPU 0's shared_lock() acquisition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		llq->cons = readl(cmdq->q.cons_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	} while (!ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) static int arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 					 struct arm_smmu_ll_queue *llq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	if (smmu->options & ARM_SMMU_OPT_MSIPOLL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		return __arm_smmu_cmdq_poll_until_msi(smmu, llq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	return __arm_smmu_cmdq_poll_until_consumed(smmu, llq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 					u32 prod, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	struct arm_smmu_ll_queue llq = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		.max_n_shift	= cmdq->q.llq.max_n_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		.prod		= prod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	for (i = 0; i < n; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		u64 *cmd = &cmds[i * CMDQ_ENT_DWORDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		prod = queue_inc_prod_n(&llq, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		queue_write(Q_ENT(&cmdq->q, prod), cmd, CMDQ_ENT_DWORDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712)  * This is the actual insertion function, and provides the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713)  * ordering guarantees to callers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715)  * - There is a dma_wmb() before publishing any commands to the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716)  *   This can be relied upon to order prior writes to data structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717)  *   in memory (such as a CD or an STE) before the command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719)  * - On completion of a CMD_SYNC, there is a control dependency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720)  *   This can be relied upon to order subsequent writes to memory (e.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721)  *   freeing an IOVA) after completion of the CMD_SYNC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723)  * - Command insertion is totally ordered, so if two CPUs each race to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724)  *   insert their own list of commands then all of the commands from one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725)  *   CPU will appear before any of the commands from the other CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 				       u64 *cmds, int n, bool sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	u64 cmd_sync[CMDQ_ENT_DWORDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	u32 prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	bool owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	struct arm_smmu_ll_queue llq = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		.max_n_shift = cmdq->q.llq.max_n_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	}, head = llq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	/* 1. Allocate some space in the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	llq.val = READ_ONCE(cmdq->q.llq.val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		u64 old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		while (!queue_has_space(&llq, n + sync)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 			local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 			if (arm_smmu_cmdq_poll_until_not_full(smmu, &llq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 				dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 			local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		head.cons = llq.cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		head.prod = queue_inc_prod_n(&llq, n + sync) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 					     CMDQ_PROD_OWNED_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		if (old == llq.val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		llq.val = old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	} while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	owner = !(llq.prod & CMDQ_PROD_OWNED_FLAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	head.prod &= ~CMDQ_PROD_OWNED_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	llq.prod &= ~CMDQ_PROD_OWNED_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	 * 2. Write our commands into the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	 * Dependency ordering from the cmpxchg() loop above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	if (sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		prod = queue_inc_prod_n(&llq, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		 * In order to determine completion of our CMD_SYNC, we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		 * ensure that the queue can't wrap twice without us noticing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		 * We achieve that by taking the cmdq lock as shared before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		 * marking our slot as valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		arm_smmu_cmdq_shared_lock(cmdq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	/* 3. Mark our slots as valid, ensuring commands are visible first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	dma_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	arm_smmu_cmdq_set_valid_map(cmdq, llq.prod, head.prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	/* 4. If we are the owner, take control of the SMMU hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	if (owner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		/* a. Wait for previous owner to finish */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		/* b. Stop gathering work by clearing the owned flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		prod = atomic_fetch_andnot_relaxed(CMDQ_PROD_OWNED_FLAG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 						   &cmdq->q.llq.atomic.prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		prod &= ~CMDQ_PROD_OWNED_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		 * c. Wait for any gathered work to be written to the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		 * Note that we read our own entries so that we have the control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		 * dependency required by (d).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		arm_smmu_cmdq_poll_valid_map(cmdq, llq.prod, prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		 * d. Advance the hardware prod pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		 * Control dependency ordering from the entries becoming valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		writel_relaxed(prod, cmdq->q.prod_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		 * e. Tell the next owner we're done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		 * Make sure we've updated the hardware first, so that we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		 * race to update prod and potentially move it backwards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		atomic_set_release(&cmdq->owner_prod, prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	/* 5. If we are inserting a CMD_SYNC, we must wait for it to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	if (sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		llq.prod = queue_inc_prod_n(&llq, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		ret = arm_smmu_cmdq_poll_until_sync(smmu, &llq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 			dev_err_ratelimited(smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 					    "CMD_SYNC timeout at 0x%08x [hwprod 0x%08x, hwcons 0x%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 					    llq.prod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 					    readl_relaxed(cmdq->q.prod_reg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 					    readl_relaxed(cmdq->q.cons_reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		 * Try to unlock the cmdq lock. This will fail if we're the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		 * reader, in which case we can safely update cmdq->q.llq.cons
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		if (!arm_smmu_cmdq_shared_tryunlock(cmdq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 			WRITE_ONCE(cmdq->q.llq.cons, llq.cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 			arm_smmu_cmdq_shared_unlock(cmdq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 				   struct arm_smmu_cmdq_ent *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	u64 cmd[CMDQ_ENT_DWORDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 			 ent->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	return arm_smmu_cmdq_issue_cmdlist(smmu, cmd, 1, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	return arm_smmu_cmdq_issue_cmdlist(smmu, NULL, 0, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 				    struct arm_smmu_cmdq_batch *cmds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 				    struct arm_smmu_cmdq_ent *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	if (cmds->num == CMDQ_BATCH_ENTRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		cmds->num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	arm_smmu_cmdq_build_cmd(&cmds->cmds[cmds->num * CMDQ_ENT_DWORDS], cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	cmds->num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 				      struct arm_smmu_cmdq_batch *cmds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) /* Context descriptor manipulation functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	struct arm_smmu_cmdq_ent cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		.opcode = CMDQ_OP_TLBI_NH_ASID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		.tlbi.asid = asid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	arm_smmu_cmdq_issue_cmd(smmu, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	arm_smmu_cmdq_issue_sync(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 			     int ssid, bool leaf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	struct arm_smmu_master *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	struct arm_smmu_cmdq_batch cmds = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	struct arm_smmu_device *smmu = smmu_domain->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	struct arm_smmu_cmdq_ent cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		.opcode	= CMDQ_OP_CFGI_CD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		.cfgi	= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 			.ssid	= ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 			.leaf	= leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	spin_lock_irqsave(&smmu_domain->devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	list_for_each_entry(master, &smmu_domain->devices, domain_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		for (i = 0; i < master->num_sids; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 			cmd.cfgi.sid = master->sids[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 			arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	arm_smmu_cmdq_batch_submit(smmu, &cmds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 					struct arm_smmu_l1_ctx_desc *l1_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	size_t size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	l1_desc->l2ptr = dmam_alloc_coherent(smmu->dev, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 					     &l1_desc->l2ptr_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	if (!l1_desc->l2ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		dev_warn(smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 			 "failed to allocate context descriptor table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) static void arm_smmu_write_cd_l1_desc(__le64 *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 				      struct arm_smmu_l1_ctx_desc *l1_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	u64 val = (l1_desc->l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		  CTXDESC_L1_DESC_V;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	/* See comment in arm_smmu_write_ctx_desc() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	WRITE_ONCE(*dst, cpu_to_le64(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_domain *smmu_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 				   u32 ssid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	__le64 *l1ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	struct arm_smmu_l1_ctx_desc *l1_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	struct arm_smmu_device *smmu = smmu_domain->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	if (smmu_domain->s1_cfg.s1fmt == STRTAB_STE_0_S1FMT_LINEAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		return cdcfg->cdtab + ssid * CTXDESC_CD_DWORDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	idx = ssid >> CTXDESC_SPLIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	l1_desc = &cdcfg->l1_desc[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	if (!l1_desc->l2ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		l1ptr = cdcfg->cdtab + idx * CTXDESC_L1_DESC_DWORDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		arm_smmu_write_cd_l1_desc(l1ptr, l1_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		/* An invalid L1CD can be cached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		arm_smmu_sync_cd(smmu_domain, ssid, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	idx = ssid & (CTXDESC_L2_ENTRIES - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	return l1_desc->l2ptr + idx * CTXDESC_CD_DWORDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 			    struct arm_smmu_ctx_desc *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	 * This function handles the following cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	 * (1) Install primary CD, for normal DMA traffic (SSID = 0).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	 * (2) Install a secondary CD, for SID+SSID traffic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	 * (3) Update ASID of a CD. Atomically write the first 64 bits of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	 *     CD, then invalidate the old entry and mappings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	 * (4) Remove a secondary CD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	bool cd_live;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	__le64 *cdptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	struct arm_smmu_device *smmu = smmu_domain->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	if (WARN_ON(ssid >= (1 << smmu_domain->s1_cfg.s1cdmax)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	cdptr = arm_smmu_get_cd_ptr(smmu_domain, ssid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	if (!cdptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	val = le64_to_cpu(cdptr[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	cd_live = !!(val & CTXDESC_CD_0_V);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	if (!cd) { /* (4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	} else if (cd_live) { /* (3) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		val &= ~CTXDESC_CD_0_ASID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		val |= FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		 * Until CD+TLB invalidation, both ASIDs may be used for tagging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		 * this substream's traffic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	} else { /* (1) and (2) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		cdptr[1] = cpu_to_le64(cd->ttbr & CTXDESC_CD_1_TTB0_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		cdptr[2] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		cdptr[3] = cpu_to_le64(cd->mair);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		 * STE is live, and the SMMU might read dwords of this CD in any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		 * order. Ensure that it observes valid values before reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		 * V=1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		arm_smmu_sync_cd(smmu_domain, ssid, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		val = cd->tcr |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 			CTXDESC_CD_0_ENDI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 			CTXDESC_CD_0_R | CTXDESC_CD_0_A |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 			(cd->mm ? 0 : CTXDESC_CD_0_ASET) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 			CTXDESC_CD_0_AA64 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 			FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 			CTXDESC_CD_0_V;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		/* STALL_MODEL==0b10 && CD.S==0 is ILLEGAL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 			val |= CTXDESC_CD_0_S;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	 * The SMMU accesses 64-bit values atomically. See IHI0070Ca 3.21.3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	 * "Configuration structures and configuration invalidation completion"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	 *   The size of single-copy atomic reads made by the SMMU is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	 *   IMPLEMENTATION DEFINED but must be at least 64 bits. Any single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	 *   field within an aligned 64-bit span of a structure can be altered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	 *   without first making the structure invalid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	WRITE_ONCE(cdptr[0], cpu_to_le64(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	arm_smmu_sync_cd(smmu_domain, ssid, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) static int arm_smmu_alloc_cd_tables(struct arm_smmu_domain *smmu_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	size_t l1size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	size_t max_contexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	struct arm_smmu_device *smmu = smmu_domain->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	struct arm_smmu_ctx_desc_cfg *cdcfg = &cfg->cdcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	max_contexts = 1 << cfg->s1cdmax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	    max_contexts <= CTXDESC_L2_ENTRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		cfg->s1fmt = STRTAB_STE_0_S1FMT_LINEAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		cdcfg->num_l1_ents = max_contexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		l1size = max_contexts * (CTXDESC_CD_DWORDS << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		cfg->s1fmt = STRTAB_STE_0_S1FMT_64K_L2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		cdcfg->num_l1_ents = DIV_ROUND_UP(max_contexts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 						  CTXDESC_L2_ENTRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		cdcfg->l1_desc = devm_kcalloc(smmu->dev, cdcfg->num_l1_ents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 					      sizeof(*cdcfg->l1_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 					      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		if (!cdcfg->l1_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	cdcfg->cdtab = dmam_alloc_coherent(smmu->dev, l1size, &cdcfg->cdtab_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 					   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	if (!cdcfg->cdtab) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		dev_warn(smmu->dev, "failed to allocate context descriptor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		goto err_free_l1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) err_free_l1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	if (cdcfg->l1_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		devm_kfree(smmu->dev, cdcfg->l1_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		cdcfg->l1_desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) static void arm_smmu_free_cd_tables(struct arm_smmu_domain *smmu_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	size_t size, l1size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	struct arm_smmu_device *smmu = smmu_domain->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	if (cdcfg->l1_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		for (i = 0; i < cdcfg->num_l1_ents; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 			if (!cdcfg->l1_desc[i].l2ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 			dmam_free_coherent(smmu->dev, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 					   cdcfg->l1_desc[i].l2ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 					   cdcfg->l1_desc[i].l2ptr_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		devm_kfree(smmu->dev, cdcfg->l1_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		cdcfg->l1_desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		l1size = cdcfg->num_l1_ents * (CTXDESC_CD_DWORDS << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	dmam_free_coherent(smmu->dev, l1size, cdcfg->cdtab, cdcfg->cdtab_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	cdcfg->cdtab_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	cdcfg->cdtab = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	bool free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	struct arm_smmu_ctx_desc *old_cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	if (!cd->asid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	free = refcount_dec_and_test(&cd->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	if (free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		old_cd = xa_erase(&arm_smmu_asid_xa, cd->asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		WARN_ON(old_cd != cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	return free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) /* Stream table manipulation functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	u64 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	val |= FIELD_PREP(STRTAB_L1_DESC_SPAN, desc->span);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	val |= desc->l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	/* See comment in arm_smmu_write_ctx_desc() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	WRITE_ONCE(*dst, cpu_to_le64(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	struct arm_smmu_cmdq_ent cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		.opcode	= CMDQ_OP_CFGI_STE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		.cfgi	= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 			.sid	= sid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 			.leaf	= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	arm_smmu_cmdq_issue_cmd(smmu, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	arm_smmu_cmdq_issue_sync(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 				      __le64 *dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	 * This is hideously complicated, but we only really care about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	 * three cases at the moment:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	 * 1. Invalid (all zero) -> bypass/fault (init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	 * 2. Bypass/fault -> translation/bypass (attach)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	 * 3. Translation/bypass -> bypass/fault (detach)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	 * Given that we can't update the STE atomically and the SMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	 * doesn't read the thing in a defined order, that leaves us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	 * with the following maintenance requirements:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	 * 1. Update Config, return (init time STEs aren't live)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	 * 2. Write everything apart from dword 0, sync, write dword 0, sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	 * 3. Update Config, sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	u64 val = le64_to_cpu(dst[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	bool ste_live = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	struct arm_smmu_device *smmu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	struct arm_smmu_s1_cfg *s1_cfg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	struct arm_smmu_s2_cfg *s2_cfg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	struct arm_smmu_domain *smmu_domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	struct arm_smmu_cmdq_ent prefetch_cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		.opcode		= CMDQ_OP_PREFETCH_CFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		.prefetch	= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 			.sid	= sid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	if (master) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		smmu_domain = master->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		smmu = master->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	if (smmu_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		switch (smmu_domain->stage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		case ARM_SMMU_DOMAIN_S1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 			s1_cfg = &smmu_domain->s1_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		case ARM_SMMU_DOMAIN_S2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		case ARM_SMMU_DOMAIN_NESTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 			s2_cfg = &smmu_domain->s2_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	if (val & STRTAB_STE_0_V) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		switch (FIELD_GET(STRTAB_STE_0_CFG, val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		case STRTAB_STE_0_CFG_BYPASS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		case STRTAB_STE_0_CFG_S1_TRANS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		case STRTAB_STE_0_CFG_S2_TRANS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 			ste_live = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		case STRTAB_STE_0_CFG_ABORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 			BUG_ON(!disable_bypass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 			BUG(); /* STE corruption */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	/* Nuke the existing STE_0 value, as we're going to rewrite it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	val = STRTAB_STE_0_V;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	/* Bypass/fault */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	if (!smmu_domain || !(s1_cfg || s2_cfg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		if (!smmu_domain && disable_bypass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 			val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 			val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		dst[0] = cpu_to_le64(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		dst[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 						STRTAB_STE_1_SHCFG_INCOMING));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 		dst[2] = 0; /* Nuke the VMID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		 * The SMMU can perform negative caching, so we must sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		 * the STE regardless of whether the old value was live.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		if (smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 			arm_smmu_sync_ste_for_sid(smmu, sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	if (s1_cfg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 		BUG_ON(ste_live);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		dst[1] = cpu_to_le64(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 			 FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 			 FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 			 FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 			 FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 			 FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_NSEL1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		if (smmu->features & ARM_SMMU_FEAT_STALLS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		   !(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 			dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		val |= (s1_cfg->cdcfg.cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 			FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 			FIELD_PREP(STRTAB_STE_0_S1CDMAX, s1_cfg->s1cdmax) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 			FIELD_PREP(STRTAB_STE_0_S1FMT, s1_cfg->s1fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	if (s2_cfg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		BUG_ON(ste_live);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		dst[2] = cpu_to_le64(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 			 FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 			 FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 			 STRTAB_STE_2_S2ENDI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 			 STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 			 STRTAB_STE_2_S2R);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 		dst[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 		val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	if (master->ats_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		dst[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 						 STRTAB_STE_1_EATS_TRANS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	arm_smmu_sync_ste_for_sid(smmu, sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	/* See comment in arm_smmu_write_ctx_desc() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	WRITE_ONCE(dst[0], cpu_to_le64(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	arm_smmu_sync_ste_for_sid(smmu, sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	/* It's likely that we'll want to use the new STE soon */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	for (i = 0; i < nent; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		arm_smmu_write_strtab_ent(NULL, -1, strtab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		strtab += STRTAB_STE_DWORDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	void *strtab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	if (desc->l2ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	desc->span = STRTAB_SPLIT + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 					  GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	if (!desc->l2ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 		dev_err(smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 			"failed to allocate l2 stream table for SID %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 			sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	arm_smmu_write_strtab_l1_desc(strtab, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) /* IRQ and event handlers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	struct arm_smmu_device *smmu = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	struct arm_smmu_queue *q = &smmu->evtq.q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	struct arm_smmu_ll_queue *llq = &q->llq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	u64 evt[EVTQ_ENT_DWORDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		while (!queue_remove_raw(q, evt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 			u8 id = FIELD_GET(EVTQ_0_ID, evt[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 			dev_info(smmu->dev, "event 0x%02x received:\n", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 			for (i = 0; i < ARRAY_SIZE(evt); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 				dev_info(smmu->dev, "\t0x%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 					 (unsigned long long)evt[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		 * Not much we can do on overflow, so scream and pretend we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		 * trying harder.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		if (queue_sync_prod_in(q) == -EOVERFLOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 			dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	} while (!queue_empty(llq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	/* Sync our overflow flag, as we believe we're up to speed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		    Q_IDX(llq, llq->cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	u32 sid, ssid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	u16 grpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	bool ssv, last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	sid = FIELD_GET(PRIQ_0_SID, evt[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	ssv = FIELD_GET(PRIQ_0_SSID_V, evt[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	ssid = ssv ? FIELD_GET(PRIQ_0_SSID, evt[0]) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	last = FIELD_GET(PRIQ_0_PRG_LAST, evt[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	grpid = FIELD_GET(PRIQ_1_PRG_IDX, evt[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	dev_info(smmu->dev, "unexpected PRI request received:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	dev_info(smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 		 sid, ssid, grpid, last ? "L" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		 evt[0] & PRIQ_0_PERM_READ ? "R" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 		 evt[1] & PRIQ_1_ADDR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	if (last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 		struct arm_smmu_cmdq_ent cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 			.opcode			= CMDQ_OP_PRI_RESP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 			.substream_valid	= ssv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 			.pri			= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 				.sid	= sid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 				.ssid	= ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 				.grpid	= grpid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 				.resp	= PRI_RESP_DENY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		arm_smmu_cmdq_issue_cmd(smmu, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	struct arm_smmu_device *smmu = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	struct arm_smmu_queue *q = &smmu->priq.q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	struct arm_smmu_ll_queue *llq = &q->llq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	u64 evt[PRIQ_ENT_DWORDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		while (!queue_remove_raw(q, evt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 			arm_smmu_handle_ppr(smmu, evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		if (queue_sync_prod_in(q) == -EOVERFLOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 			dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	} while (!queue_empty(llq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	/* Sync our overflow flag, as we believe we're up to speed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		      Q_IDX(llq, llq->cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	queue_sync_cons_out(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	u32 gerror, gerrorn, active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	struct arm_smmu_device *smmu = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	active = gerror ^ gerrorn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	if (!(active & GERROR_ERR_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		return IRQ_NONE; /* No errors pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	dev_warn(smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		 "unexpected global error reported (0x%08x), this could be serious\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		 active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	if (active & GERROR_SFM_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 		dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		arm_smmu_device_disable(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	if (active & GERROR_MSI_GERROR_ABT_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 		dev_warn(smmu->dev, "GERROR MSI write aborted\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	if (active & GERROR_MSI_PRIQ_ABT_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 		dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	if (active & GERROR_MSI_EVTQ_ABT_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 		dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	if (active & GERROR_MSI_CMDQ_ABT_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	if (active & GERROR_PRIQ_ABT_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 		dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	if (active & GERROR_EVTQ_ABT_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	if (active & GERROR_CMDQ_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		arm_smmu_cmdq_skip_err(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	writel(gerror, smmu->base + ARM_SMMU_GERRORN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) static irqreturn_t arm_smmu_combined_irq_thread(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	struct arm_smmu_device *smmu = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	arm_smmu_evtq_thread(irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	if (smmu->features & ARM_SMMU_FEAT_PRI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 		arm_smmu_priq_thread(irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	arm_smmu_gerror_handler(irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	return IRQ_WAKE_THREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 			struct arm_smmu_cmdq_ent *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	size_t log2_span;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	size_t span_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	/* ATC invalidates are always on 4096-bytes pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	size_t inval_grain_shift = 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	unsigned long page_start, page_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	*cmd = (struct arm_smmu_cmdq_ent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 		.opcode			= CMDQ_OP_ATC_INV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		.substream_valid	= !!ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		.atc.ssid		= ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	if (!size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		cmd->atc.size = ATC_INV_SIZE_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	page_start	= iova >> inval_grain_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	page_end	= (iova + size - 1) >> inval_grain_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	 * In an ATS Invalidate Request, the address must be aligned on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	 * range size, which must be a power of two number of page sizes. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	 * thus have to choose between grossly over-invalidating the region, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	 * splitting the invalidation into multiple commands. For simplicity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	 * we'll go with the first solution, but should refine it in the future
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	 * if multiple commands are shown to be more efficient.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	 * Find the smallest power of two that covers the range. The most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	 * significant differing bit between the start and end addresses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	 * fls(start ^ end), indicates the required span. For example:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	 * We want to invalidate pages [8; 11]. This is already the ideal range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	 *		x = 0b1000 ^ 0b1011 = 0b11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	 *		span = 1 << fls(x) = 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	 * To invalidate pages [7; 10], we need to invalidate [0; 15]:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	 *		x = 0b0111 ^ 0b1010 = 0b1101
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	 *		span = 1 << fls(x) = 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	log2_span	= fls_long(page_start ^ page_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	span_mask	= (1ULL << log2_span) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	page_start	&= ~span_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	cmd->atc.addr	= page_start << inval_grain_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	cmd->atc.size	= log2_span;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) static int arm_smmu_atc_inv_master(struct arm_smmu_master *master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	struct arm_smmu_cmdq_ent cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	for (i = 0; i < master->num_sids; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		cmd.atc.sid = master->sids[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		arm_smmu_cmdq_issue_cmd(master->smmu, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	return arm_smmu_cmdq_issue_sync(master->smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 				   int ssid, unsigned long iova, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	struct arm_smmu_cmdq_ent cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	struct arm_smmu_master *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	struct arm_smmu_cmdq_batch cmds = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	 * Ensure that we've completed prior invalidation of the main TLBs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	 * before we read 'nr_ats_masters' in case of a concurrent call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	 * arm_smmu_enable_ats():
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	 *	// unmap()			// arm_smmu_enable_ats()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	 *	TLBI+SYNC			atomic_inc(&nr_ats_masters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	 *	smp_mb();			[...]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	 *	atomic_read(&nr_ats_masters);	pci_enable_ats() // writel()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	 * Ensures that we always see the incremented 'nr_ats_masters' count if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	 * ATS was enabled at the PCI device before completion of the TLBI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	if (!atomic_read(&smmu_domain->nr_ats_masters))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	spin_lock_irqsave(&smmu_domain->devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	list_for_each_entry(master, &smmu_domain->devices, domain_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 		if (!master->ats_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 		for (i = 0; i < master->num_sids; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 			cmd.atc.sid = master->sids[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 			arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	return arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) /* IO_PGTABLE API */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) static void arm_smmu_tlb_inv_context(void *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	struct arm_smmu_domain *smmu_domain = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	struct arm_smmu_device *smmu = smmu_domain->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	struct arm_smmu_cmdq_ent cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	 * NOTE: when io-pgtable is in non-strict mode, we may get here with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	 * PTEs previously cleared by unmaps on the current CPU not yet visible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	 * to the SMMU. We are relying on the dma_wmb() implicit during cmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	 * insertion to guarantee those are observed before the TLBI. Do be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	 * careful, 007.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 		arm_smmu_tlb_inv_asid(smmu, smmu_domain->s1_cfg.cd.asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 		cmd.opcode	= CMDQ_OP_TLBI_S12_VMALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 		cmd.tlbi.vmid	= smmu_domain->s2_cfg.vmid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 		arm_smmu_cmdq_issue_cmd(smmu, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 		arm_smmu_cmdq_issue_sync(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 				   size_t granule, bool leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 				   struct arm_smmu_domain *smmu_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	struct arm_smmu_device *smmu = smmu_domain->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	unsigned long start = iova, end = iova + size, num_pages = 0, tg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	size_t inv_range = granule;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	struct arm_smmu_cmdq_batch cmds = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	struct arm_smmu_cmdq_ent cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		.tlbi = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 			.leaf	= leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	if (!size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 		cmd.opcode	= CMDQ_OP_TLBI_NH_VA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 		cmd.tlbi.asid	= smmu_domain->s1_cfg.cd.asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 		cmd.opcode	= CMDQ_OP_TLBI_S2_IPA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		cmd.tlbi.vmid	= smmu_domain->s2_cfg.vmid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 		/* Get the leaf page size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		tg = __ffs(smmu_domain->domain.pgsize_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 		/* Convert page size of 12,14,16 (log2) to 1,2,3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 		cmd.tlbi.tg = (tg - 10) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 		/* Determine what level the granule is at */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 		cmd.tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 		num_pages = size >> tg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	while (iova < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 		if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 			 * On each iteration of the loop, the range is 5 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 			 * worth of the aligned size remaining.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 			 * The range in pages is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 			 * range = (num_pages & (0x1f << __ffs(num_pages)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 			unsigned long scale, num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 			/* Determine the power of 2 multiple number of pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 			scale = __ffs(num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 			cmd.tlbi.scale = scale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 			/* Determine how many chunks of 2^scale size we have */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 			num = (num_pages >> scale) & CMDQ_TLBI_RANGE_NUM_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 			cmd.tlbi.num = num - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 			/* range is num * 2^scale * pgsize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 			inv_range = num << (scale + tg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 			/* Clear out the lower order bits for the next iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 			num_pages -= num << scale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 		cmd.tlbi.addr = iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 		arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 		iova += inv_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	arm_smmu_cmdq_batch_submit(smmu, &cmds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	 * Unfortunately, this can't be leaf-only since we may have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	 * zapped an entire table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	arm_smmu_atc_inv_domain(smmu_domain, 0, start, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 					 unsigned long iova, size_t granule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 					 void *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	struct arm_smmu_domain *smmu_domain = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	struct iommu_domain *domain = &smmu_domain->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	iommu_iotlb_gather_add_page(domain, gather, iova, granule);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 				  size_t granule, void *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	arm_smmu_tlb_inv_range(iova, size, granule, false, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) static const struct iommu_flush_ops arm_smmu_flush_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	.tlb_flush_all	= arm_smmu_tlb_inv_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	.tlb_flush_walk = arm_smmu_tlb_inv_walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	.tlb_add_page	= arm_smmu_tlb_inv_page_nosync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) /* IOMMU API */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) static bool arm_smmu_capable(enum iommu_cap cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	switch (cap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	case IOMMU_CAP_CACHE_COHERENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	case IOMMU_CAP_NOEXEC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	struct arm_smmu_domain *smmu_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	if (type != IOMMU_DOMAIN_UNMANAGED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	    type != IOMMU_DOMAIN_DMA &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	    type != IOMMU_DOMAIN_IDENTITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	 * Allocate the domain and initialise some of its data structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	 * We can't really do anything meaningful until we've added a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	 * master.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	if (!smmu_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	if (type == IOMMU_DOMAIN_DMA &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	    iommu_get_dma_cookie(&smmu_domain->domain)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 		kfree(smmu_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	mutex_init(&smmu_domain->init_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 	INIT_LIST_HEAD(&smmu_domain->devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	spin_lock_init(&smmu_domain->devices_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	return &smmu_domain->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) static int arm_smmu_bitmap_alloc(unsigned long *map, int span)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	int idx, size = 1 << span;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 		idx = find_first_zero_bit(map, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 		if (idx == size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 			return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	} while (test_and_set_bit(idx, map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) static void arm_smmu_bitmap_free(unsigned long *map, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	clear_bit(idx, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) static void arm_smmu_domain_free(struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	struct arm_smmu_device *smmu = smmu_domain->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	iommu_put_dma_cookie(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	free_io_pgtable_ops(smmu_domain->pgtbl_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	/* Free the CD and ASID, if we allocated them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 		struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 		/* Prevent SVA from touching the CD while we're freeing it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		mutex_lock(&arm_smmu_asid_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		if (cfg->cdcfg.cdtab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 			arm_smmu_free_cd_tables(smmu_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 		arm_smmu_free_asid(&cfg->cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 		mutex_unlock(&arm_smmu_asid_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 		struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 		if (cfg->vmid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 			arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	kfree(smmu_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 				       struct arm_smmu_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 				       struct io_pgtable_cfg *pgtbl_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	u32 asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	struct arm_smmu_device *smmu = smmu_domain->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 	typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	refcount_set(&cfg->cd.refs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	/* Prevent SVA from modifying the ASID until it is written to the CD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	mutex_lock(&arm_smmu_asid_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	ret = xa_alloc(&arm_smmu_asid_xa, &asid, &cfg->cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 		       XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	cfg->s1cdmax = master->ssid_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	ret = arm_smmu_alloc_cd_tables(smmu_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 		goto out_free_asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	cfg->cd.asid	= (u16)asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	cfg->cd.ttbr	= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	cfg->cd.tcr	= FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 			  FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 			  FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 			  FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 			  FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 			  FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 			  CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	cfg->cd.mair	= pgtbl_cfg->arm_lpae_s1_cfg.mair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	 * Note that this will end up calling arm_smmu_sync_cd() before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	 * the master has been added to the devices list for this domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	 * This isn't an issue because the STE hasn't been installed yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	ret = arm_smmu_write_ctx_desc(smmu_domain, 0, &cfg->cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 		goto out_free_cd_tables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	mutex_unlock(&arm_smmu_asid_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) out_free_cd_tables:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	arm_smmu_free_cd_tables(smmu_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) out_free_asid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	arm_smmu_free_asid(&cfg->cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	mutex_unlock(&arm_smmu_asid_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 				       struct arm_smmu_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 				       struct io_pgtable_cfg *pgtbl_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	int vmid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	struct arm_smmu_device *smmu = smmu_domain->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	if (vmid < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 		return vmid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	vtcr = &pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	cfg->vmid	= (u16)vmid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	cfg->vttbr	= pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	cfg->vtcr	= FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 			  FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 			  FIELD_PREP(STRTAB_STE_2_VTCR_S2IR0, vtcr->irgn) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 			  FIELD_PREP(STRTAB_STE_2_VTCR_S2OR0, vtcr->orgn) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 			  FIELD_PREP(STRTAB_STE_2_VTCR_S2SH0, vtcr->sh) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 			  FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, vtcr->tg) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 			  FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, vtcr->ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) static int arm_smmu_domain_finalise(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 				    struct arm_smmu_master *master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	unsigned long ias, oas;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	enum io_pgtable_fmt fmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	struct io_pgtable_cfg pgtbl_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	struct io_pgtable_ops *pgtbl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	int (*finalise_stage_fn)(struct arm_smmu_domain *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 				 struct arm_smmu_master *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 				 struct io_pgtable_cfg *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	struct arm_smmu_device *smmu = smmu_domain->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	if (domain->type == IOMMU_DOMAIN_IDENTITY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 		smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	/* Restrict the stage to what we can actually support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 		smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 		smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	switch (smmu_domain->stage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	case ARM_SMMU_DOMAIN_S1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 		ias = (smmu->features & ARM_SMMU_FEAT_VAX) ? 52 : 48;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 		ias = min_t(unsigned long, ias, VA_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 		oas = smmu->ias;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 		fmt = ARM_64_LPAE_S1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 		finalise_stage_fn = arm_smmu_domain_finalise_s1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	case ARM_SMMU_DOMAIN_NESTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	case ARM_SMMU_DOMAIN_S2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 		ias = smmu->ias;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 		oas = smmu->oas;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 		fmt = ARM_64_LPAE_S2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 		finalise_stage_fn = arm_smmu_domain_finalise_s2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	pgtbl_cfg = (struct io_pgtable_cfg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 		.pgsize_bitmap	= smmu->pgsize_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 		.ias		= ias,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		.oas		= oas,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		.coherent_walk	= smmu->features & ARM_SMMU_FEAT_COHERENCY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 		.tlb		= &arm_smmu_flush_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 		.iommu_dev	= smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	if (smmu_domain->non_strict)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 		pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	if (!pgtbl_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	domain->geometry.force_aperture = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	ret = finalise_stage_fn(smmu_domain, master, &pgtbl_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 		free_io_pgtable_ops(pgtbl_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	smmu_domain->pgtbl_ops = pgtbl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	__le64 *step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 		struct arm_smmu_strtab_l1_desc *l1_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 		int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 		/* Two-level walk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 		idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 		l1_desc = &cfg->l1_desc[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 		idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 		step = &l1_desc->l2ptr[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 		/* Simple linear lookup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 		step = &cfg->strtab[sid * STRTAB_STE_DWORDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	return step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	struct arm_smmu_device *smmu = master->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 	for (i = 0; i < master->num_sids; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 		u32 sid = master->sids[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 		__le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 		/* Bridged PCI devices may end up with duplicated IDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 		for (j = 0; j < i; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 			if (master->sids[j] == sid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 		if (j < i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 		arm_smmu_write_strtab_ent(master, sid, step);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) static bool arm_smmu_ats_supported(struct arm_smmu_master *master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 	struct device *dev = master->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	struct arm_smmu_device *smmu = master->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	if (!(smmu->features & ARM_SMMU_FEAT_ATS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	if (!(fwspec->flags & IOMMU_FWSPEC_PCI_RC_ATS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	return dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) static void arm_smmu_enable_ats(struct arm_smmu_master *master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	size_t stu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	struct arm_smmu_device *smmu = master->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	struct arm_smmu_domain *smmu_domain = master->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 	/* Don't enable ATS at the endpoint if it's not enabled in the STE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	if (!master->ats_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	/* Smallest Translation Unit: log2 of the smallest supported granule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	stu = __ffs(smmu->pgsize_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	pdev = to_pci_dev(master->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	atomic_inc(&smmu_domain->nr_ats_masters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	if (pci_enable_ats(pdev, stu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 		dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) static void arm_smmu_disable_ats(struct arm_smmu_master *master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	struct arm_smmu_domain *smmu_domain = master->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 	if (!master->ats_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	pci_disable_ats(to_pci_dev(master->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	 * Ensure ATS is disabled at the endpoint before we issue the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	 * ATC invalidation via the SMMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	arm_smmu_atc_inv_master(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	atomic_dec(&smmu_domain->nr_ats_masters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) static int arm_smmu_enable_pasid(struct arm_smmu_master *master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	int features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	int num_pasids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	if (!dev_is_pci(master->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	pdev = to_pci_dev(master->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	features = pci_pasid_features(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	if (features < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 		return features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	num_pasids = pci_max_pasids(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	if (num_pasids <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 		return num_pasids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	ret = pci_enable_pasid(pdev, features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 		dev_err(&pdev->dev, "Failed to enable PASID\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	master->ssid_bits = min_t(u8, ilog2(num_pasids),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 				  master->smmu->ssid_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) static void arm_smmu_disable_pasid(struct arm_smmu_master *master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	if (!dev_is_pci(master->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	pdev = to_pci_dev(master->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	if (!pdev->pasid_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	master->ssid_bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	pci_disable_pasid(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) static void arm_smmu_detach_dev(struct arm_smmu_master *master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 	struct arm_smmu_domain *smmu_domain = master->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	if (!smmu_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	arm_smmu_disable_ats(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	spin_lock_irqsave(&smmu_domain->devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	list_del(&master->domain_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	master->domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	master->ats_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	arm_smmu_install_ste_for_dev(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	struct arm_smmu_device *smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	struct arm_smmu_master *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 	if (!fwspec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	master = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	smmu = master->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 	 * Checking that SVA is disabled ensures that this device isn't bound to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 	 * any mm, and can be safely detached from its old domain. Bonds cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 	 * be removed concurrently since we're holding the group mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 	if (arm_smmu_master_sva_enabled(master)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 		dev_err(dev, "cannot attach - SVA enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	arm_smmu_detach_dev(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	mutex_lock(&smmu_domain->init_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 	if (!smmu_domain->smmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 		smmu_domain->smmu = smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 		ret = arm_smmu_domain_finalise(domain, master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 			smmu_domain->smmu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	} else if (smmu_domain->smmu != smmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 		dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 			"cannot attach to SMMU %s (upstream of %s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 			dev_name(smmu_domain->smmu->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 			dev_name(smmu->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 		ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	} else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 		   master->ssid_bits != smmu_domain->s1_cfg.s1cdmax) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 		dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 			"cannot attach to incompatible domain (%u SSID bits != %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 			smmu_domain->s1_cfg.s1cdmax, master->ssid_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	master->domain = smmu_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 		master->ats_enabled = arm_smmu_ats_supported(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	arm_smmu_install_ste_for_dev(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	spin_lock_irqsave(&smmu_domain->devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 	list_add(&master->domain_head, &smmu_domain->devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 	spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 	arm_smmu_enable_ats(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 	mutex_unlock(&smmu_domain->init_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 			phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 	struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 	if (!ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	return ops->map(ops, iova, paddr, size, prot, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 			     size_t size, struct iommu_iotlb_gather *gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	if (!ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	return ops->unmap(ops, iova, size, gather);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	if (smmu_domain->smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 		arm_smmu_tlb_inv_context(smmu_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 				struct iommu_iotlb_gather *gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	arm_smmu_tlb_inv_range(gather->start, gather->end - gather->start + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 			       gather->pgsize, true, smmu_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) static phys_addr_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 	struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	if (domain->type == IOMMU_DOMAIN_IDENTITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 		return iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 	if (!ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 	return ops->iova_to_phys(ops, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) static struct platform_driver arm_smmu_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 							  fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 	return dev ? dev_get_drvdata(dev) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	unsigned long limit = smmu->strtab_cfg.num_l1_ents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 		limit *= 1UL << STRTAB_SPLIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	return sid < limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) static struct iommu_ops arm_smmu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) static struct iommu_device *arm_smmu_probe_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 	struct arm_smmu_device *smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	struct arm_smmu_master *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	if (!fwspec || fwspec->ops != &arm_smmu_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 		return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	if (WARN_ON_ONCE(dev_iommu_priv_get(dev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 		return ERR_PTR(-EBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	if (!smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 		return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 	master = kzalloc(sizeof(*master), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	if (!master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	master->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 	master->smmu = smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	master->sids = fwspec->ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	master->num_sids = fwspec->num_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 	INIT_LIST_HEAD(&master->bonds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	dev_iommu_priv_set(dev, master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	/* Check the SIDs are in range of the SMMU and our stream table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	for (i = 0; i < master->num_sids; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 		u32 sid = master->sids[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 		if (!arm_smmu_sid_in_range(smmu, sid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 			ret = -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 			goto err_free_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 		/* Ensure l2 strtab is initialised */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 		if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 			ret = arm_smmu_init_l2_strtab(smmu, sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 				goto err_free_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	master->ssid_bits = min(smmu->ssid_bits, fwspec->num_pasid_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 	 * Note that PASID must be enabled before, and disabled after ATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 	 * PCI Express Base 4.0r1.0 - 10.5.1.3 ATS Control Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	 *   Behavior is undefined if this bit is Set and the value of the PASID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 	 *   Enable, Execute Requested Enable, or Privileged Mode Requested bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	 *   are changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 	arm_smmu_enable_pasid(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 	if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 		master->ssid_bits = min_t(u8, master->ssid_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 					  CTXDESC_LINEAR_CDMAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	return &smmu->iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) err_free_master:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	kfree(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 	dev_iommu_priv_set(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 	return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) static void arm_smmu_release_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	struct arm_smmu_master *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 	if (!fwspec || fwspec->ops != &arm_smmu_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	master = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 	WARN_ON(arm_smmu_master_sva_enabled(master));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 	arm_smmu_detach_dev(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 	arm_smmu_disable_pasid(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 	kfree(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 	iommu_fwspec_free(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) static struct iommu_group *arm_smmu_device_group(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 	struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	 * We don't support devices sharing stream IDs other than PCI RID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 	 * aliases, since the necessary ID-to-device lookup becomes rather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 	 * impractical given a potential sparse 32-bit stream ID space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 	if (dev_is_pci(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 		group = pci_device_group(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 		group = generic_device_group(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 				    enum iommu_attr attr, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	switch (domain->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	case IOMMU_DOMAIN_UNMANAGED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 		switch (attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 		case DOMAIN_ATTR_NESTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 			*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 			return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 	case IOMMU_DOMAIN_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 		switch (attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 		case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 			*(int *)data = smmu_domain->non_strict;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 			return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 				    enum iommu_attr attr, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 	mutex_lock(&smmu_domain->init_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 	switch (domain->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 	case IOMMU_DOMAIN_UNMANAGED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 		switch (attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 		case DOMAIN_ATTR_NESTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 			if (smmu_domain->smmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 				ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 				goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 			if (*(int *)data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 				smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 				smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 			ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 	case IOMMU_DOMAIN_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 		switch(attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 		case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 			smmu_domain->non_strict = *(int *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 			ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 	mutex_unlock(&smmu_domain->init_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 	return iommu_fwspec_add_ids(dev, args->args, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) static void arm_smmu_get_resv_regions(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 				      struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	struct iommu_resv_region *region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 	region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 					 prot, IOMMU_RESV_SW_MSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 	if (!region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 	list_add_tail(&region->list, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	iommu_dma_get_resv_regions(dev, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) static bool arm_smmu_dev_has_feature(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 				     enum iommu_dev_features feat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 	struct arm_smmu_master *master = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	if (!master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	switch (feat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	case IOMMU_DEV_FEAT_SVA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 		return arm_smmu_master_sva_supported(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) static bool arm_smmu_dev_feature_enabled(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 					 enum iommu_dev_features feat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	struct arm_smmu_master *master = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 	if (!master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 	switch (feat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	case IOMMU_DEV_FEAT_SVA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 		return arm_smmu_master_sva_enabled(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) static int arm_smmu_dev_enable_feature(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 				       enum iommu_dev_features feat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 	if (!arm_smmu_dev_has_feature(dev, feat))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 	if (arm_smmu_dev_feature_enabled(dev, feat))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	switch (feat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 	case IOMMU_DEV_FEAT_SVA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 		return arm_smmu_master_enable_sva(dev_iommu_priv_get(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) static int arm_smmu_dev_disable_feature(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 					enum iommu_dev_features feat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 	if (!arm_smmu_dev_feature_enabled(dev, feat))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 	switch (feat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 	case IOMMU_DEV_FEAT_SVA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 		return arm_smmu_master_disable_sva(dev_iommu_priv_get(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) static struct iommu_ops arm_smmu_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 	.capable		= arm_smmu_capable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 	.domain_alloc		= arm_smmu_domain_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 	.domain_free		= arm_smmu_domain_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 	.attach_dev		= arm_smmu_attach_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 	.map			= arm_smmu_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 	.unmap			= arm_smmu_unmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 	.flush_iotlb_all	= arm_smmu_flush_iotlb_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 	.iotlb_sync		= arm_smmu_iotlb_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 	.iova_to_phys		= arm_smmu_iova_to_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 	.probe_device		= arm_smmu_probe_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	.release_device		= arm_smmu_release_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 	.device_group		= arm_smmu_device_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 	.domain_get_attr	= arm_smmu_domain_get_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 	.domain_set_attr	= arm_smmu_domain_set_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 	.of_xlate		= arm_smmu_of_xlate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 	.get_resv_regions	= arm_smmu_get_resv_regions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 	.put_resv_regions	= generic_iommu_put_resv_regions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 	.dev_has_feat		= arm_smmu_dev_has_feature,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 	.dev_feat_enabled	= arm_smmu_dev_feature_enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 	.dev_enable_feat	= arm_smmu_dev_enable_feature,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 	.dev_disable_feat	= arm_smmu_dev_disable_feature,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 	.pgsize_bitmap		= -1UL, /* Restricted during device attach */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) /* Probing and initialisation functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 				   struct arm_smmu_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 				   unsigned long prod_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 				   unsigned long cons_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 				   size_t dwords, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 	size_t qsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 		qsz = ((1 << q->llq.max_n_shift) * dwords) << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 		q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 					      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 		if (q->base || qsz < PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 		q->llq.max_n_shift--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 	} while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 	if (!q->base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 		dev_err(smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 			"failed to allocate queue (0x%zx bytes) for %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 			qsz, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 	if (!WARN_ON(q->base_dma & (qsz - 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 		dev_info(smmu->dev, "allocated %u entries for %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 			 1 << q->llq.max_n_shift, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	q->prod_reg	= arm_smmu_page1_fixup(prod_off, smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 	q->cons_reg	= arm_smmu_page1_fixup(cons_off, smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 	q->ent_dwords	= dwords;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 	q->q_base  = Q_BASE_RWA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 	q->q_base |= q->base_dma & Q_BASE_ADDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 	q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->llq.max_n_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	q->llq.prod = q->llq.cons = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) static void arm_smmu_cmdq_free_bitmap(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 	unsigned long *bitmap = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 	bitmap_free(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) static int arm_smmu_cmdq_init(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 	struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 	unsigned int nents = 1 << cmdq->q.llq.max_n_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 	atomic_long_t *bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 	atomic_set(&cmdq->owner_prod, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 	atomic_set(&cmdq->lock, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 	bitmap = (atomic_long_t *)bitmap_zalloc(nents, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 	if (!bitmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 		dev_err(smmu->dev, "failed to allocate cmdq bitmap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 		cmdq->valid_map = bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 		devm_add_action(smmu->dev, arm_smmu_cmdq_free_bitmap, bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 	/* cmdq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 	ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 				      ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 				      "cmdq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 	ret = arm_smmu_cmdq_init(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 	/* evtq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 				      ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 				      "evtq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 	/* priq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 	if (!(smmu->features & ARM_SMMU_FEAT_PRI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 	return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 				       ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 				       "priq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 	struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 	size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 	void *strtab = smmu->strtab_cfg.strtab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 	cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 	if (!cfg->l1_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 		dev_err(smmu->dev, "failed to allocate l1 stream table desc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 	for (i = 0; i < cfg->num_l1_ents; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 		arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 		strtab += STRTAB_L1_DESC_DWORDS << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 	void *strtab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 	u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 	u32 size, l1size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 	struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 	/* Calculate the L1 size, capped to the SIDSIZE. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 	size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 	size = min(size, smmu->sid_bits - STRTAB_SPLIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 	cfg->num_l1_ents = 1 << size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 	size += STRTAB_SPLIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 	if (size < smmu->sid_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 		dev_warn(smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 			 "2-level strtab only covers %u/%u bits of SID\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 			 size, smmu->sid_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 	l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 	strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 				     GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 	if (!strtab) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 		dev_err(smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 			"failed to allocate l1 stream table (%u bytes)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 			l1size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 	cfg->strtab = strtab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 	/* Configure strtab_base_cfg for 2 levels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 	reg  = FIELD_PREP(STRTAB_BASE_CFG_FMT, STRTAB_BASE_CFG_FMT_2LVL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 	reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 	reg |= FIELD_PREP(STRTAB_BASE_CFG_SPLIT, STRTAB_SPLIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 	cfg->strtab_base_cfg = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 	return arm_smmu_init_l1_strtab(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 	void *strtab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 	u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 	u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 	struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 	size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 	strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 				     GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 	if (!strtab) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 		dev_err(smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 			"failed to allocate linear stream table (%u bytes)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 			size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 	cfg->strtab = strtab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 	cfg->num_l1_ents = 1 << smmu->sid_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 	/* Configure strtab_base_cfg for a linear table covering all SIDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 	reg  = FIELD_PREP(STRTAB_BASE_CFG_FMT, STRTAB_BASE_CFG_FMT_LINEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 	reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 	cfg->strtab_base_cfg = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 	arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 	u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 	if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 		ret = arm_smmu_init_strtab_2lvl(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 		ret = arm_smmu_init_strtab_linear(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 	/* Set the strtab base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 	reg  = smmu->strtab_cfg.strtab_dma & STRTAB_BASE_ADDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 	reg |= STRTAB_BASE_RA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 	smmu->strtab_cfg.strtab_base = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 	/* Allocate the first VMID for stage-2 bypass STEs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 	set_bit(0, smmu->vmid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 	ret = arm_smmu_init_queues(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 	return arm_smmu_init_strtab(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 				   unsigned int reg_off, unsigned int ack_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 	writel_relaxed(val, smmu->base + reg_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 	return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 					  1, ARM_SMMU_POLL_TIMEOUT_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) /* GBPA is "special" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 	u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 	ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 					 1, ARM_SMMU_POLL_TIMEOUT_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 	reg &= ~clr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 	reg |= set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 	writel_relaxed(reg | GBPA_UPDATE, gbpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 	ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 					 1, ARM_SMMU_POLL_TIMEOUT_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 		dev_err(smmu->dev, "GBPA not responding to update\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) static void arm_smmu_free_msis(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 	struct device *dev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 	platform_msi_domain_free_irqs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 	phys_addr_t doorbell;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 	struct device *dev = msi_desc_to_dev(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 	struct arm_smmu_device *smmu = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 	phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 	doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 	doorbell &= MSI_CFG0_ADDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 	writeq_relaxed(doorbell, smmu->base + cfg[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 	writel_relaxed(msg->data, smmu->base + cfg[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 	writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 	struct msi_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 	int ret, nvec = ARM_SMMU_MAX_MSIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 	struct device *dev = smmu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 	/* Clear the MSI address regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 	writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 	writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 	if (smmu->features & ARM_SMMU_FEAT_PRI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 		writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 		nvec--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 	if (!(smmu->features & ARM_SMMU_FEAT_MSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 	if (!dev->msi_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 		dev_info(smmu->dev, "msi_domain absent - falling back to wired irqs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 	/* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 	ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 		dev_warn(dev, "failed to allocate MSIs - falling back to wired irqs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 	for_each_msi_entry(desc, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 		switch (desc->platform.msi_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 		case EVTQ_MSI_INDEX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 			smmu->evtq.q.irq = desc->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 		case GERROR_MSI_INDEX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 			smmu->gerr_irq = desc->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 		case PRIQ_MSI_INDEX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 			smmu->priq.q.irq = desc->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 		default:	/* Unknown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 	/* Add callback to free MSIs on teardown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 	devm_add_action(dev, arm_smmu_free_msis, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 	int irq, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 	arm_smmu_setup_msis(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 	/* Request interrupt lines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 	irq = smmu->evtq.q.irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 	if (irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 		ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 						arm_smmu_evtq_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 						IRQF_ONESHOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 						"arm-smmu-v3-evtq", smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 			dev_warn(smmu->dev, "failed to enable evtq irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 		dev_warn(smmu->dev, "no evtq irq - events will not be reported!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 	irq = smmu->gerr_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 	if (irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 		ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 				       0, "arm-smmu-v3-gerror", smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 			dev_warn(smmu->dev, "failed to enable gerror irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 		dev_warn(smmu->dev, "no gerr irq - errors will not be reported!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 	if (smmu->features & ARM_SMMU_FEAT_PRI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 		irq = smmu->priq.q.irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 		if (irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 			ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 							arm_smmu_priq_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 							IRQF_ONESHOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 							"arm-smmu-v3-priq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 							smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 				dev_warn(smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 					 "failed to enable priq irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 			dev_warn(smmu->dev, "no priq irq - PRI will be broken\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 	int ret, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 	u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 	/* Disable IRQs first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 	ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 				      ARM_SMMU_IRQ_CTRLACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 		dev_err(smmu->dev, "failed to disable irqs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 	irq = smmu->combined_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 	if (irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 		 * Cavium ThunderX2 implementation doesn't support unique irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 		 * lines. Use a single irq line for all the SMMUv3 interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 		ret = devm_request_threaded_irq(smmu->dev, irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 					arm_smmu_combined_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 					arm_smmu_combined_irq_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 					IRQF_ONESHOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 					"arm-smmu-v3-combined-irq", smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 			dev_warn(smmu->dev, "failed to enable combined irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 		arm_smmu_setup_unique_irqs(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 	if (smmu->features & ARM_SMMU_FEAT_PRI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 		irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 	/* Enable interrupt generation on the SMMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 	ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 				      ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 		dev_warn(smmu->dev, "failed to enable irqs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 	ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 		dev_err(smmu->dev, "failed to clear cr0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 	u32 reg, enables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 	struct arm_smmu_cmdq_ent cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 	/* Clear CR0 and sync (disables SMMU and queue processing) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 	reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 	if (reg & CR0_SMMUEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 		dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 		WARN_ON(is_kdump_kernel() && !disable_bypass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 		arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 	ret = arm_smmu_device_disable(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 	/* CR1 (table and queue memory attributes) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 	reg = FIELD_PREP(CR1_TABLE_SH, ARM_SMMU_SH_ISH) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 	      FIELD_PREP(CR1_TABLE_OC, CR1_CACHE_WB) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 	      FIELD_PREP(CR1_TABLE_IC, CR1_CACHE_WB) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 	      FIELD_PREP(CR1_QUEUE_SH, ARM_SMMU_SH_ISH) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 	      FIELD_PREP(CR1_QUEUE_OC, CR1_CACHE_WB) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 	      FIELD_PREP(CR1_QUEUE_IC, CR1_CACHE_WB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 	writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 	/* CR2 (random crap) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 	reg = CR2_PTM | CR2_RECINVSID | CR2_E2H;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 	writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 	/* Stream table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 	writeq_relaxed(smmu->strtab_cfg.strtab_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 		       smmu->base + ARM_SMMU_STRTAB_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 	writel_relaxed(smmu->strtab_cfg.strtab_base_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 		       smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 	/* Command queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 	writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 	writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 	writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 	enables = CR0_CMDQEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 	ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 				      ARM_SMMU_CR0ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 		dev_err(smmu->dev, "failed to enable command queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 	/* Invalidate any cached configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 	cmd.opcode = CMDQ_OP_CFGI_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 	arm_smmu_cmdq_issue_cmd(smmu, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 	arm_smmu_cmdq_issue_sync(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 	/* Invalidate any stale TLB entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 	if (smmu->features & ARM_SMMU_FEAT_HYP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 		cmd.opcode = CMDQ_OP_TLBI_EL2_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 		arm_smmu_cmdq_issue_cmd(smmu, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 	cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 	arm_smmu_cmdq_issue_cmd(smmu, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 	arm_smmu_cmdq_issue_sync(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 	/* Event queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 	writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 	writel_relaxed(smmu->evtq.q.llq.prod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 		       arm_smmu_page1_fixup(ARM_SMMU_EVTQ_PROD, smmu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 	writel_relaxed(smmu->evtq.q.llq.cons,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 		       arm_smmu_page1_fixup(ARM_SMMU_EVTQ_CONS, smmu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 	enables |= CR0_EVTQEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 	ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 				      ARM_SMMU_CR0ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 		dev_err(smmu->dev, "failed to enable event queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 	/* PRI queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 	if (smmu->features & ARM_SMMU_FEAT_PRI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 		writeq_relaxed(smmu->priq.q.q_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 			       smmu->base + ARM_SMMU_PRIQ_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 		writel_relaxed(smmu->priq.q.llq.prod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 			       arm_smmu_page1_fixup(ARM_SMMU_PRIQ_PROD, smmu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) 		writel_relaxed(smmu->priq.q.llq.cons,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) 			       arm_smmu_page1_fixup(ARM_SMMU_PRIQ_CONS, smmu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) 		enables |= CR0_PRIQEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 		ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 					      ARM_SMMU_CR0ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 			dev_err(smmu->dev, "failed to enable PRI queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 	if (smmu->features & ARM_SMMU_FEAT_ATS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 		enables |= CR0_ATSCHK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 		ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 					      ARM_SMMU_CR0ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 			dev_err(smmu->dev, "failed to enable ATS check\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 	ret = arm_smmu_setup_irqs(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 		dev_err(smmu->dev, "failed to setup irqs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 	if (is_kdump_kernel())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 		enables &= ~(CR0_EVTQEN | CR0_PRIQEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 	/* Enable the SMMU interface, or ensure bypass */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 	if (!bypass || disable_bypass) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 		enables |= CR0_SMMUEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 		ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 	ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 				      ARM_SMMU_CR0ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 		dev_err(smmu->dev, "failed to enable SMMU interface\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 	bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 	/* IDR0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 	reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 	/* 2-level structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 	if (FIELD_GET(IDR0_ST_LVL, reg) == IDR0_ST_LVL_2LVL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 		smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 	if (reg & IDR0_CD2L)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 		smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 	 * Translation table endianness.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 	 * We currently require the same endianness as the CPU, but this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 	 * could be changed later by adding a new IO_PGTABLE_QUIRK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 	switch (FIELD_GET(IDR0_TTENDIAN, reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 	case IDR0_TTENDIAN_MIXED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) 		smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 	case IDR0_TTENDIAN_BE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 		smmu->features |= ARM_SMMU_FEAT_TT_BE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 	case IDR0_TTENDIAN_LE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 		smmu->features |= ARM_SMMU_FEAT_TT_LE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 		dev_err(smmu->dev, "unknown/unsupported TT endianness!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 	/* Boolean feature flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 	if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 		smmu->features |= ARM_SMMU_FEAT_PRI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 	if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 		smmu->features |= ARM_SMMU_FEAT_ATS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 	if (reg & IDR0_SEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 		smmu->features |= ARM_SMMU_FEAT_SEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 	if (reg & IDR0_MSI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 		smmu->features |= ARM_SMMU_FEAT_MSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 		if (coherent && !disable_msipolling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 			smmu->options |= ARM_SMMU_OPT_MSIPOLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 	if (reg & IDR0_HYP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 		smmu->features |= ARM_SMMU_FEAT_HYP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 	 * The coherency feature as set by FW is used in preference to the ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 	 * register, but warn on mismatch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 	if (!!(reg & IDR0_COHACC) != coherent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 		dev_warn(smmu->dev, "IDR0.COHACC overridden by FW configuration (%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 			 coherent ? "true" : "false");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 	switch (FIELD_GET(IDR0_STALL_MODEL, reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 	case IDR0_STALL_MODEL_FORCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 		smmu->features |= ARM_SMMU_FEAT_STALL_FORCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 	case IDR0_STALL_MODEL_STALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 		smmu->features |= ARM_SMMU_FEAT_STALLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 	if (reg & IDR0_S1P)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 		smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 	if (reg & IDR0_S2P)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 		smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 	if (!(reg & (IDR0_S1P | IDR0_S2P))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 		dev_err(smmu->dev, "no translation support!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 	/* We only support the AArch64 table format at present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 	switch (FIELD_GET(IDR0_TTF, reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 	case IDR0_TTF_AARCH32_64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) 		smmu->ias = 40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 	case IDR0_TTF_AARCH64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 		dev_err(smmu->dev, "AArch64 table format not supported!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 	/* ASID/VMID sizes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 	smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 	smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 	/* IDR1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 	reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 	if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 		dev_err(smmu->dev, "embedded implementation not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 	/* Queue sizes, capped to ensure natural alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 	smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 					     FIELD_GET(IDR1_CMDQS, reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 	if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 		 * We don't support splitting up batches, so one batch of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 		 * commands plus an extra sync needs to fit inside the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 		 * queue. There's also no way we can handle the weird alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 		 * restrictions on the base pointer for a unit-length queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 		dev_err(smmu->dev, "command queue size <= %d entries not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 			CMDQ_BATCH_ENTRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 	smmu->evtq.q.llq.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 					     FIELD_GET(IDR1_EVTQS, reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 	smmu->priq.q.llq.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 					     FIELD_GET(IDR1_PRIQS, reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 	/* SID/SSID sizes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 	smmu->ssid_bits = FIELD_GET(IDR1_SSIDSIZE, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 	smmu->sid_bits = FIELD_GET(IDR1_SIDSIZE, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 	 * If the SMMU supports fewer bits than would fill a single L2 stream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 	 * table, use a linear table instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 	if (smmu->sid_bits <= STRTAB_SPLIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 		smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 	/* IDR3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 	reg = readl_relaxed(smmu->base + ARM_SMMU_IDR3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 	if (FIELD_GET(IDR3_RIL, reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 		smmu->features |= ARM_SMMU_FEAT_RANGE_INV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 	/* IDR5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 	reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 	/* Maximum number of outstanding stalls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 	smmu->evtq.max_stalls = FIELD_GET(IDR5_STALL_MAX, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 	/* Page sizes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 	if (reg & IDR5_GRAN64K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 		smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 	if (reg & IDR5_GRAN16K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 		smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) 	if (reg & IDR5_GRAN4K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 		smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 	/* Input address size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 	if (FIELD_GET(IDR5_VAX, reg) == IDR5_VAX_52_BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 		smmu->features |= ARM_SMMU_FEAT_VAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) 	/* Output address size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 	switch (FIELD_GET(IDR5_OAS, reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) 	case IDR5_OAS_32_BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 		smmu->oas = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 	case IDR5_OAS_36_BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) 		smmu->oas = 36;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) 	case IDR5_OAS_40_BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 		smmu->oas = 40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 	case IDR5_OAS_42_BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 		smmu->oas = 42;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 	case IDR5_OAS_44_BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 		smmu->oas = 44;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 	case IDR5_OAS_52_BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 		smmu->oas = 52;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 		smmu->pgsize_bitmap |= 1ULL << 42; /* 4TB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 		dev_info(smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 			"unknown output address size. Truncating to 48-bit\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 	case IDR5_OAS_48_BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 		smmu->oas = 48;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 	if (arm_smmu_ops.pgsize_bitmap == -1UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 		arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) 		arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) 	/* Set the DMA mask for our table walker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) 	if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) 		dev_warn(smmu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) 			 "failed to set DMA mask for table walker\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) 	smmu->ias = max(smmu->ias, smmu->oas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) 	if (arm_smmu_sva_supported(smmu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) 		smmu->features |= ARM_SMMU_FEAT_SVA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) 	dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) 		 smmu->ias, smmu->oas, smmu->features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) #ifdef CONFIG_ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) static void acpi_smmu_get_options(u32 model, struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) 	switch (model) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 	case ACPI_IORT_SMMU_V3_CAVIUM_CN99XX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 		smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 	case ACPI_IORT_SMMU_V3_HISILICON_HI161X:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 		smmu->options |= ARM_SMMU_OPT_SKIP_PREFETCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) 	dev_notice(smmu->dev, "option mask 0x%x\n", smmu->options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) 				      struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) 	struct acpi_iort_smmu_v3 *iort_smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) 	struct device *dev = smmu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) 	struct acpi_iort_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 	node = *(struct acpi_iort_node **)dev_get_platdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 	/* Retrieve SMMUv3 specific data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 	iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 	acpi_smmu_get_options(iort_smmu->model, smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 	if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 		smmu->features |= ARM_SMMU_FEAT_COHERENCY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 					     struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 	return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) static int arm_smmu_device_dt_probe(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 				    struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 	struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) 	u32 cells;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 	int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 	if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 		dev_err(dev, "missing #iommu-cells property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 	else if (cells != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 		dev_err(dev, "invalid #iommu-cells value (%d)\n", cells);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 	parse_driver_options(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 	if (of_dma_is_coherent(dev->of_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 		smmu->features |= ARM_SMMU_FEAT_COHERENCY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) 	if (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) 		return SZ_64K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) 		return SZ_128K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) static int arm_smmu_set_bus_ops(struct iommu_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) 	if (pci_bus_type.iommu_ops != ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) 		err = bus_set_iommu(&pci_bus_type, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) #ifdef CONFIG_ARM_AMBA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 	if (amba_bustype.iommu_ops != ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) 		err = bus_set_iommu(&amba_bustype, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) 			goto err_reset_pci_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) 	if (platform_bus_type.iommu_ops != ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) 		err = bus_set_iommu(&platform_bus_type, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) 			goto err_reset_amba_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) err_reset_amba_ops:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) #ifdef CONFIG_ARM_AMBA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) 	bus_set_iommu(&amba_bustype, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) err_reset_pci_ops: __maybe_unused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) 	bus_set_iommu(&pci_bus_type, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) static void __iomem *arm_smmu_ioremap(struct device *dev, resource_size_t start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 				      resource_size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 	struct resource res = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 		.flags = IORESOURCE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 		.start = start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 		.end = start + size - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 	return devm_ioremap_resource(dev, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) static int arm_smmu_device_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) 	int irq, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) 	resource_size_t ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) 	struct arm_smmu_device *smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) 	struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) 	bool bypass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) 	smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) 	if (!smmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) 		dev_err(dev, "failed to allocate arm_smmu_device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 	smmu->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) 	if (dev->of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) 		ret = arm_smmu_device_dt_probe(pdev, smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) 		ret = arm_smmu_device_acpi_probe(pdev, smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) 		if (ret == -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 	/* Set bypass mode according to firmware probing result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 	bypass = !!ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 	/* Base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 	if (resource_size(res) < arm_smmu_resource_size(smmu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) 		dev_err(dev, "MMIO region too small (%pr)\n", res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) 	ioaddr = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 	 * Don't map the IMPLEMENTATION DEFINED regions, since they may contain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 	 * the PMCG registers which are reserved by the PMU driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) 	smmu->base = arm_smmu_ioremap(dev, ioaddr, ARM_SMMU_REG_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) 	if (IS_ERR(smmu->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) 		return PTR_ERR(smmu->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 	if (arm_smmu_resource_size(smmu) > SZ_64K) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 		smmu->page1 = arm_smmu_ioremap(dev, ioaddr + SZ_64K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 					       ARM_SMMU_REG_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 		if (IS_ERR(smmu->page1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 			return PTR_ERR(smmu->page1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 		smmu->page1 = smmu->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 	/* Interrupt lines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) 	irq = platform_get_irq_byname_optional(pdev, "combined");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) 	if (irq > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) 		smmu->combined_irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) 		irq = platform_get_irq_byname_optional(pdev, "eventq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) 		if (irq > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) 			smmu->evtq.q.irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) 		irq = platform_get_irq_byname_optional(pdev, "priq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) 		if (irq > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) 			smmu->priq.q.irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) 		irq = platform_get_irq_byname_optional(pdev, "gerror");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) 		if (irq > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) 			smmu->gerr_irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) 	/* Probe the h/w */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) 	ret = arm_smmu_device_hw_probe(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) 	/* Initialise in-memory data structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) 	ret = arm_smmu_init_structures(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) 	/* Record our private device structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) 	platform_set_drvdata(pdev, smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) 	/* Reset the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) 	ret = arm_smmu_device_reset(smmu, bypass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) 	/* And we're up. Go go go! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) 	ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) 				     "smmu3.%pa", &ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) 	iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) 	iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) 	ret = iommu_device_register(&smmu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) 		dev_err(dev, "Failed to register iommu\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 	return arm_smmu_set_bus_ops(&arm_smmu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) static int arm_smmu_device_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) 	struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) 	arm_smmu_set_bus_ops(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) 	iommu_device_unregister(&smmu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) 	iommu_device_sysfs_remove(&smmu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) 	arm_smmu_device_disable(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) static void arm_smmu_device_shutdown(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) 	arm_smmu_device_remove(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) static const struct of_device_id arm_smmu_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) 	{ .compatible = "arm,smmu-v3", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) 	{ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) static struct platform_driver arm_smmu_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) 	.driver	= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) 		.name			= "arm-smmu-v3",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) 		.of_match_table		= arm_smmu_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) 		.suppress_bind_attrs	= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) 	.probe	= arm_smmu_device_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) 	.remove	= arm_smmu_device_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) 	.shutdown = arm_smmu_device_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) module_platform_driver(arm_smmu_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) MODULE_AUTHOR("Will Deacon <will@kernel.org>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) MODULE_ALIAS("platform:arm-smmu-v3");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) MODULE_LICENSE("GPL v2");