Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (C) 2016, Semihalf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *	Author: Tomasz Nowicki <tn@semihalf.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * This file implements early detection/parsing of I/O mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * reported to OS through firmware via I/O Remapping Table (IORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * IORT document number: ARM DEN 0049A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #define pr_fmt(fmt)	"ACPI: IORT: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/acpi_iort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #define IORT_TYPE_MASK(type)	(1 << (type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #define IORT_MSI_TYPE		(1 << ACPI_IORT_NODE_ITS_GROUP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #define IORT_IOMMU_TYPE		((1 << ACPI_IORT_NODE_SMMU) |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 				(1 << ACPI_IORT_NODE_SMMU_V3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) struct iort_its_msi_chip {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 	struct list_head	list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 	struct fwnode_handle	*fw_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 	phys_addr_t		base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 	u32			translation_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) struct iort_fwnode {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	struct acpi_iort_node *iort_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	struct fwnode_handle *fwnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) static LIST_HEAD(iort_fwnode_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) static DEFINE_SPINLOCK(iort_fwnode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  * iort_set_fwnode() - Create iort_fwnode and use it to register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  *		       iommu data in the iort_fwnode_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  * @iort_node: IORT table node associated with the IOMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  * @fwnode: fwnode associated with the IORT node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  * Returns: 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  *          <0 on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 				  struct fwnode_handle *fwnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	struct iort_fwnode *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	if (WARN_ON(!np))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	INIT_LIST_HEAD(&np->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	np->iort_node = iort_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	np->fwnode = fwnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	spin_lock(&iort_fwnode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	list_add_tail(&np->list, &iort_fwnode_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	spin_unlock(&iort_fwnode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75)  * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77)  * @node: IORT table node to be looked-up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79)  * Returns: fwnode_handle pointer on success, NULL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) static inline struct fwnode_handle *iort_get_fwnode(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 			struct acpi_iort_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	struct iort_fwnode *curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	struct fwnode_handle *fwnode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	spin_lock(&iort_fwnode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	list_for_each_entry(curr, &iort_fwnode_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 		if (curr->iort_node == node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 			fwnode = curr->fwnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	spin_unlock(&iort_fwnode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	return fwnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100)  * iort_delete_fwnode() - Delete fwnode associated with an IORT node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102)  * @node: IORT table node associated with fwnode to delete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) static inline void iort_delete_fwnode(struct acpi_iort_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	struct iort_fwnode *curr, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	spin_lock(&iort_fwnode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		if (curr->iort_node == node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 			list_del(&curr->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 			kfree(curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	spin_unlock(&iort_fwnode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120)  * iort_get_iort_node() - Retrieve iort_node associated with an fwnode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122)  * @fwnode: fwnode associated with device to be looked-up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124)  * Returns: iort_node pointer on success, NULL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) static inline struct acpi_iort_node *iort_get_iort_node(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 			struct fwnode_handle *fwnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	struct iort_fwnode *curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	struct acpi_iort_node *iort_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	spin_lock(&iort_fwnode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	list_for_each_entry(curr, &iort_fwnode_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		if (curr->fwnode == fwnode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 			iort_node = curr->iort_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	spin_unlock(&iort_fwnode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	return iort_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) typedef acpi_status (*iort_find_node_callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	(struct acpi_iort_node *node, void *context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) /* Root pointer to the mapped IORT table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) static struct acpi_table_header *iort_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) static LIST_HEAD(iort_msi_chip_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) static DEFINE_SPINLOCK(iort_msi_chip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154)  * iort_register_domain_token() - register domain token along with related
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155)  * ITS ID and base address to the list from where we can get it back later on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  * @trans_id: ITS ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157)  * @base: ITS base address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158)  * @fw_node: Domain token.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160)  * Returns: 0 on success, -ENOMEM if no memory when allocating list element
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) int iort_register_domain_token(int trans_id, phys_addr_t base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 			       struct fwnode_handle *fw_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	struct iort_its_msi_chip *its_msi_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	if (!its_msi_chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	its_msi_chip->fw_node = fw_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	its_msi_chip->translation_id = trans_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	its_msi_chip->base_addr = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	spin_lock(&iort_msi_chip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	list_add(&its_msi_chip->list, &iort_msi_chip_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	spin_unlock(&iort_msi_chip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183)  * iort_deregister_domain_token() - Deregister domain token based on ITS ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184)  * @trans_id: ITS ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)  * Returns: none.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) void iort_deregister_domain_token(int trans_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	struct iort_its_msi_chip *its_msi_chip, *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	spin_lock(&iort_msi_chip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		if (its_msi_chip->translation_id == trans_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 			list_del(&its_msi_chip->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 			kfree(its_msi_chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	spin_unlock(&iort_msi_chip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204)  * iort_find_domain_token() - Find domain token based on given ITS ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205)  * @trans_id: ITS ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207)  * Returns: domain token when find on the list, NULL otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) struct fwnode_handle *iort_find_domain_token(int trans_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	struct fwnode_handle *fw_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	struct iort_its_msi_chip *its_msi_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	spin_lock(&iort_msi_chip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		if (its_msi_chip->translation_id == trans_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 			fw_node = its_msi_chip->fw_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	spin_unlock(&iort_msi_chip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	return fw_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 					     iort_find_node_callback callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 					     void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	struct acpi_iort_node *iort_node, *iort_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	struct acpi_table_iort *iort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	if (!iort_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	/* Get the first IORT node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	iort = (struct acpi_table_iort *)iort_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 				 iort->node_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 				iort_table->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	for (i = 0; i < iort->node_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 			       "IORT node pointer overflows, bad table!\n"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		if (iort_node->type == type &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		    ACPI_SUCCESS(callback(iort_node, context)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 			return iort_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 					 iort_node->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 					    void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	struct device *dev = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	acpi_status status = AE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		struct acpi_device *adev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		struct acpi_iort_named_component *ncomp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		struct device *nc_dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		 * Walk the device tree to find a device with an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		 * ACPI companion; there is no point in scanning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		 * IORT for a device matching a named component if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		 * the device does not have an ACPI companion to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		 * start with.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 			adev = ACPI_COMPANION(nc_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 			if (adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 			nc_dev = nc_dev->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		} while (nc_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		if (!adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		if (ACPI_FAILURE(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 			dev_warn(nc_dev, "Can't get device full path name\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		ncomp = (struct acpi_iort_named_component *)node->node_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		status = !strcmp(ncomp->device_name, buf.pointer) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 							AE_OK : AE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		acpi_os_free(buf.pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	} else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		struct acpi_iort_root_complex *pci_rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		struct pci_bus *bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		bus = to_pci_bus(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		pci_rc = (struct acpi_iort_root_complex *)node->node_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		 * It is assumed that PCI segment numbers maps one-to-one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		 * with root complexes. Each segment number can represent only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		 * one root complex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		status = pci_rc->pci_segment_number == pci_domain_nr(bus) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 							AE_OK : AE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		       u32 *rid_out, bool check_overlap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	/* Single mapping does not care for input id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		if (type == ACPI_IORT_NODE_NAMED_COMPONENT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		    type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 			*rid_out = map->output_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 			map, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	if (rid_in < map->input_base ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	    (rid_in > map->input_base + map->id_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	if (check_overlap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		 * We already found a mapping for this input ID at the end of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		 * another region. If it coincides with the start of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		 * region, we assume the prior match was due to the off-by-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		 * issue mentioned below, and allow it to be superseded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		 * Otherwise, things are *really* broken, and we just disregard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		 * duplicate matches entirely to retain compatibility.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		pr_err(FW_BUG "[map %p] conflicting mapping for input ID 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		       map, rid_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		if (rid_in != map->input_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 			return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		pr_err(FW_BUG "applying workaround.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	*rid_out = map->output_base + (rid_in - map->input_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	 * Due to confusion regarding the meaning of the id_count field (which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	 * carries the number of IDs *minus 1*), we may have to disregard this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	 * match if it is at the end of the range, and overlaps with the start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	 * of another one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	if (map->id_count > 0 && rid_in == map->input_base + map->id_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 					       u32 *id_out, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	struct acpi_iort_node *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	struct acpi_iort_id_mapping *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	if (!node->mapping_offset || !node->mapping_count ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 				     index >= node->mapping_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 			   node->mapping_offset + index * sizeof(*map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	/* Firmware bug! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	if (!map->output_reference) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		       node, node->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 			       map->output_reference);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		    node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		    node->type == ACPI_IORT_NODE_SMMU_V3 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		    node->type == ACPI_IORT_NODE_PMCG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 			*id_out = map->output_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 			return parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) static int iort_get_id_mapping_index(struct acpi_iort_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	struct acpi_iort_smmu_v3 *smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	struct acpi_iort_pmcg *pmcg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	switch (node->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	case ACPI_IORT_NODE_SMMU_V3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		 * SMMUv3 dev ID mapping index was introduced in revision 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		 * table, not available in revision 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		if (node->revision < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		 * ID mapping index is only ignored if all interrupts are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		 * GSIV based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		if (smmu->event_gsiv && smmu->pri_gsiv && smmu->gerr_gsiv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 		    && smmu->sync_gsiv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		if (smmu->id_mapping_index >= node->mapping_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 			pr_err(FW_BUG "[node %p type %d] ID mapping index overflows valid mappings\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 			       node, node->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		return smmu->id_mapping_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	case ACPI_IORT_NODE_PMCG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		pmcg = (struct acpi_iort_pmcg *)node->node_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		if (pmcg->overflow_gsiv || node->mapping_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 					       u32 id_in, u32 *id_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 					       u8 type_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	u32 id = id_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	/* Parse the ID mapping tree to find specified node type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	while (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		struct acpi_iort_id_mapping *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		int i, index, rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		u32 out_ref = 0, map_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		if (IORT_TYPE_MASK(node->type) & type_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 			if (id_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 				*id_out = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 			return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		if (!node->mapping_offset || !node->mapping_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 			goto fail_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 				   node->mapping_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		/* Firmware bug! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		if (!map->output_reference) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 			pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 			       node, node->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 			goto fail_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		 * Get the special ID mapping index (if any) and skip its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		 * associated ID map to prevent erroneous multi-stage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		 * IORT ID translations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		index = iort_get_id_mapping_index(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		/* Do the ID translation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		for (i = 0; i < node->mapping_count; i++, map++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 			/* if it is special mapping index, skip it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 			if (i == index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 			rc = iort_id_map(map, node->type, map_id, &id, out_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 			if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 			if (rc == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 				out_ref = map->output_reference;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		if (i == node->mapping_count && !out_ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 			goto fail_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 				    rc ? out_ref : map->output_reference);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) fail_map:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	/* Map input ID to output ID unchanged on mapping failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	if (id_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		*id_out = id_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) static struct acpi_iort_node *iort_node_map_platform_id(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		struct acpi_iort_node *node, u32 *id_out, u8 type_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	struct acpi_iort_node *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	/* step 1: retrieve the initial dev id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	parent = iort_node_get_id(node, &id, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	if (!parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	 * optional step 2: map the initial dev id if its parent is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	 * the target type we want, map it again for the use cases such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	 * as NC (named component) -> SMMU -> ITS. If the type is matched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	 * return the initial dev id and its parent pointer directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	if (!(IORT_TYPE_MASK(parent->type) & type_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		parent = iort_node_map_id(parent, id, id_out, type_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		if (id_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 			*id_out = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	return parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	struct pci_bus *pbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	if (!dev_is_pci(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		struct acpi_iort_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		 * scan iort_fwnode_list to see if it's an iort platform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		 * device (such as SMMU, PMCG),its iort node already cached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		 * and associated with fwnode when iort platform devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		 * were initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		node = iort_get_iort_node(dev->fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		if (node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 			return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		 * if not, then it should be a platform device defined in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		 * DSDT/SSDT (with Named Component node in IORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 				      iort_match_node_callback, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	pbus = to_pci_dev(dev)->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 			      iort_match_node_callback, &pbus->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569)  * iort_msi_map_id() - Map a MSI input ID for a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570)  * @dev: The device for which the mapping is to be done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571)  * @input_id: The device input ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573)  * Returns: mapped MSI ID on success, input ID otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) u32 iort_msi_map_id(struct device *dev, u32 input_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	struct acpi_iort_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	u32 dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	node = iort_find_dev_node(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		return input_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	iort_node_map_id(node, input_id, &dev_id, IORT_MSI_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	return dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589)  * iort_pmsi_get_dev_id() - Get the device id for a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590)  * @dev: The device for which the mapping is to be done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591)  * @dev_id: The device ID found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593)  * Returns: 0 for successful find a dev id, -ENODEV on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	int i, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	struct acpi_iort_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	node = iort_find_dev_node(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	index = iort_get_id_mapping_index(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	/* if there is a valid index, go get the dev_id directly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	if (index >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		if (iort_node_get_id(node, dev_id, index))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		for (i = 0; i < node->mapping_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 			if (iort_node_map_platform_id(node, dev_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 						      IORT_MSI_TYPE, i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) static int __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	struct iort_its_msi_chip *its_msi_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	spin_lock(&iort_msi_chip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		if (its_msi_chip->translation_id == its_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 			*base = its_msi_chip->base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	spin_unlock(&iort_msi_chip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639)  * iort_dev_find_its_id() - Find the ITS identifier for a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640)  * @dev: The device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641)  * @id: Device's ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642)  * @idx: Index of the ITS identifier list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643)  * @its_id: ITS identifier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645)  * Returns: 0 on success, appropriate error value otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) static int iort_dev_find_its_id(struct device *dev, u32 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 				unsigned int idx, int *its_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	struct acpi_iort_its_group *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	struct acpi_iort_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	node = iort_find_dev_node(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	node = iort_node_map_id(node, id, NULL, IORT_MSI_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	/* Move to ITS specific data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	its = (struct acpi_iort_its_group *)node->node_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	if (idx >= its->its_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 			idx, its->its_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	*its_id = its->identifiers[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674)  * iort_get_device_domain() - Find MSI domain related to a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675)  * @dev: The device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676)  * @id: Requester ID for the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677)  * @bus_token: irq domain bus token.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679)  * Returns: the MSI domain for this device, NULL otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 					  enum irq_domain_bus_token bus_token)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	struct fwnode_handle *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	int its_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	if (iort_dev_find_its_id(dev, id, 0, &its_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	handle = iort_find_domain_token(its_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	return irq_find_matching_fwnode(handle, bus_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) static void iort_set_device_domain(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 				   struct acpi_iort_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	struct acpi_iort_its_group *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	struct acpi_iort_node *msi_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	struct acpi_iort_id_mapping *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	struct fwnode_handle *iort_fwnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	struct irq_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	index = iort_get_id_mapping_index(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	if (index < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 			   node->mapping_offset + index * sizeof(*map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	/* Firmware bug! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	if (!map->output_reference ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	    !(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		pr_err(FW_BUG "[node %p type %d] Invalid MSI mapping\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		       node, node->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	msi_parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 				  map->output_reference);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	if (!msi_parent || msi_parent->type != ACPI_IORT_NODE_ITS_GROUP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	/* Move to ITS specific data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	its = (struct acpi_iort_its_group *)msi_parent->node_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	iort_fwnode = iort_find_domain_token(its->identifiers[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	if (!iort_fwnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	domain = irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	if (domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		dev_set_msi_domain(dev, domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741)  * iort_get_platform_device_domain() - Find MSI domain related to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742)  * platform device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743)  * @dev: the dev pointer associated with the platform device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745)  * Returns: the MSI domain for this device, NULL otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	struct acpi_iort_node *node, *msi_parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	struct fwnode_handle *iort_fwnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	struct acpi_iort_its_group *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	/* find its associated iort node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 			      iort_match_node_callback, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	/* then find its msi parent node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	for (i = 0; i < node->mapping_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		msi_parent = iort_node_map_platform_id(node, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 						       IORT_MSI_TYPE, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		if (msi_parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	if (!msi_parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	/* Move to ITS specific data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	its = (struct acpi_iort_its_group *)msi_parent->node_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	iort_fwnode = iort_find_domain_token(its->identifiers[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	if (!iort_fwnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) void acpi_configure_pmsi_domain(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	struct irq_domain *msi_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	msi_domain = iort_get_platform_device_domain(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	if (msi_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		dev_set_msi_domain(dev, msi_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) #ifdef CONFIG_IOMMU_API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	struct acpi_iort_node *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	iommu = iort_get_iort_node(fwspec->iommu_fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	if (iommu && (iommu->type == ACPI_IORT_NODE_SMMU_V3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		struct acpi_iort_smmu_v3 *smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 			return iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	return (fwspec && fwspec->ops) ? fwspec->ops : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) static inline int iort_add_device_replay(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	if (dev->bus && !device_iommu_mapped(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		err = iommu_probe_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827)  * iort_iommu_msi_get_resv_regions - Reserved region driver helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828)  * @dev: Device from iommu_get_resv_regions()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829)  * @head: Reserved region list from iommu_get_resv_regions()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831)  * Returns: Number of msi reserved regions on success (0 if platform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832)  *          doesn't require the reservation or no associated msi regions),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833)  *          appropriate error value otherwise. The ITS interrupt translation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834)  *          spaces (ITS_base + SZ_64K, SZ_64K) associated with the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835)  *          are the msi reserved regions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	struct acpi_iort_its_group *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	struct acpi_iort_node *iommu_node, *its_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	int i, resv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	iommu_node = iort_get_msi_resv_iommu(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	if (!iommu_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	 * Current logic to reserve ITS regions relies on HW topologies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	 * where a given PCI or named component maps its IDs to only one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	 * ITS group; if a PCI or named component can map its IDs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	 * different ITS groups through IORT mappings this function has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	 * to be reworked to ensure we reserve regions for all ITS groups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	 * a given PCI or named component may map IDs to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	for (i = 0; i < fwspec->num_ids; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		its_node = iort_node_map_id(iommu_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 					fwspec->ids[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 					NULL, IORT_MSI_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		if (its_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	if (!its_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	/* Move to ITS specific data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	its = (struct acpi_iort_its_group *)its_node->node_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	for (i = 0; i < its->its_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		phys_addr_t base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		if (!iort_find_its_base(its->identifiers[i], &base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 			int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 			struct iommu_resv_region *region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 			region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 							 prot, IOMMU_RESV_MSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 			if (region) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 				list_add_tail(&region->list, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 				resv++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	return (resv == its->its_count) ? resv : -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) static inline bool iort_iommu_driver_enabled(u8 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	case ACPI_IORT_NODE_SMMU_V3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		return IS_ENABLED(CONFIG_ARM_SMMU_V3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	case ACPI_IORT_NODE_SMMU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		return IS_ENABLED(CONFIG_ARM_SMMU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		pr_warn("IORT node type %u does not describe an SMMU\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) static int arm_smmu_iort_xlate(struct device *dev, u32 streamid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 			       struct fwnode_handle *fwnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 			       const struct iommu_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	int ret = iommu_fwspec_init(dev, fwnode, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		ret = iommu_fwspec_add_ids(dev, &streamid, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	struct acpi_iort_root_complex *pci_rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	pci_rc = (struct acpi_iort_root_complex *)node->node_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 			    u32 streamid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	const struct iommu_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	struct fwnode_handle *iort_fwnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	iort_fwnode = iort_get_fwnode(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	if (!iort_fwnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	 * If the ops look-up fails, this means that either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	 * the SMMU drivers have not been probed yet or that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	 * the SMMU drivers are not built in the kernel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	 * Depending on whether the SMMU drivers are built-in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	 * in the kernel or not, defer the IOMMU configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	 * or just abort it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	ops = iommu_ops_from_fwnode(iort_fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	if (!ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		return iort_iommu_driver_enabled(node->type) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		       -EPROBE_DEFER : -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	return arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) struct iort_pci_alias_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	struct acpi_iort_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	struct iort_pci_alias_info *info = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	struct acpi_iort_node *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	u32 streamid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	parent = iort_node_map_id(info->node, alias, &streamid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 				  IORT_IOMMU_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	return iort_iommu_xlate(info->dev, parent, streamid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) static void iort_named_component_init(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 				      struct acpi_iort_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	struct acpi_iort_named_component *nc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	if (!fwspec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	nc = (struct acpi_iort_named_component *)node->node_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	fwspec->num_pasid_bits = FIELD_GET(ACPI_IORT_NC_PASID_BITS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 					   nc->node_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) static int iort_nc_iommu_map(struct device *dev, struct acpi_iort_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	struct acpi_iort_node *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	int err = -ENODEV, i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	u32 streamid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		parent = iort_node_map_platform_id(node, &streamid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 						   IORT_IOMMU_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 						   i++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		if (parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 			err = iort_iommu_xlate(dev, parent, streamid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	} while (parent && !err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) static int iort_nc_iommu_map_id(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 				struct acpi_iort_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 				const u32 *in_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	struct acpi_iort_node *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	u32 streamid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	parent = iort_node_map_id(node, *in_id, &streamid, IORT_IOMMU_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	if (parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		return iort_iommu_xlate(dev, parent, streamid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)  * iort_iommu_configure_id - Set-up IOMMU configuration for a device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)  * @dev: device to configure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)  * @id_in: optional input id const value pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)  * Returns: iommu_ops pointer on configuration success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)  *          NULL on configuration failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) const struct iommu_ops *iort_iommu_configure_id(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 						const u32 *id_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	struct acpi_iort_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	const struct iommu_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	int err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	 * If we already translated the fwspec there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	 * is nothing left to do, return the iommu_ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	ops = iort_fwspec_iommu_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	if (ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		return ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	if (dev_is_pci(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		struct iommu_fwspec *fwspec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		struct pci_bus *bus = to_pci_dev(dev)->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		struct iort_pci_alias_info info = { .dev = dev };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 				      iort_match_node_callback, &bus->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		info.node = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		err = pci_for_each_dma_alias(to_pci_dev(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 					     iort_pci_iommu_init, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		if (fwspec && iort_pci_rc_supports_ats(node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 			fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 				      iort_match_node_callback, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		err = id_in ? iort_nc_iommu_map_id(dev, node, id_in) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 			      iort_nc_iommu_map(dev, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 			iort_named_component_init(dev, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	 * If we have reason to believe the IOMMU driver missed the initial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	 * add_device callback for dev, replay it to get things in order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		ops = iort_fwspec_iommu_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		err = iort_add_device_replay(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	/* Ignore all other errors apart from EPROBE_DEFER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	if (err == -EPROBE_DEFER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		ops = ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	} else if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	return ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) const struct iommu_ops *iort_iommu_configure_id(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 						const u32 *input_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) { return NULL; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) static int nc_dma_get_range(struct device *dev, u64 *size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	struct acpi_iort_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	struct acpi_iort_named_component *ncomp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 			      iort_match_node_callback, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	ncomp = (struct acpi_iort_named_component *)node->node_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	if (!ncomp->memory_address_limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		pr_warn(FW_BUG "Named component missing memory address limit\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	*size = ncomp->memory_address_limit >= 64 ? U64_MAX :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 			1ULL<<ncomp->memory_address_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) static int rc_dma_get_range(struct device *dev, u64 *size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	struct acpi_iort_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	struct acpi_iort_root_complex *rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	struct pci_bus *pbus = to_pci_dev(dev)->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 			      iort_match_node_callback, &pbus->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	if (!node || node->revision < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	rc = (struct acpi_iort_root_complex *)node->node_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	if (!rc->memory_address_limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		pr_warn(FW_BUG "Root complex missing memory address limit\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	*size = rc->memory_address_limit >= 64 ? U64_MAX :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 			1ULL<<rc->memory_address_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)  * iort_dma_setup() - Set-up device DMA parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)  * @dev: device to configure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)  * @dma_addr: device DMA address result pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)  * @dma_size: DMA range size result pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	u64 end, mask, dmaaddr = 0, size = 0, offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	 * If @dev is expected to be DMA-capable then the bus code that created
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	 * it should have initialised its dma_mask pointer by this point. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	 * now, we'll continue the legacy behaviour of coercing it to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	 * coherent mask if not, but we'll no longer do so quietly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	if (!dev->dma_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		dev_warn(dev, "DMA mask not set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		dev->dma_mask = &dev->coherent_dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	if (dev->coherent_dma_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 		size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		size = 1ULL << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	if (ret == -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		ret = dev_is_pci(dev) ? rc_dma_get_range(dev, &size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 				      : nc_dma_get_range(dev, &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		 * Limit coherent and dma mask based on size retrieved from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		 * firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		end = dmaaddr + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		mask = DMA_BIT_MASK(ilog2(end) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		dev->bus_dma_limit = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		*dev->dma_mask = min(*dev->dma_mask, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	*dma_addr = dmaaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	*dma_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	ret = dma_direct_set_offset(dev, dmaaddr + offset, dmaaddr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	dev_dbg(dev, "dma_offset(%#08llx)%s\n", offset, ret ? " failed!" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) static void __init acpi_iort_register_irq(int hwirq, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 					  int trigger,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 					  struct resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	int irq = acpi_register_gsi(NULL, hwirq, trigger,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 				    ACPI_ACTIVE_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	if (irq <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		pr_err("could not register gsi hwirq %d name [%s]\n", hwirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 								      name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	res->start = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	res->end = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	res->flags = IORESOURCE_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	res->name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	struct acpi_iort_smmu_v3 *smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	/* Always present mem resource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	int num_res = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	/* Retrieve SMMUv3 specific data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	if (smmu->event_gsiv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		num_res++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	if (smmu->pri_gsiv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		num_res++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	if (smmu->gerr_gsiv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		num_res++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	if (smmu->sync_gsiv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		num_res++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	return num_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	 * Cavium ThunderX2 implementation doesn't not support unique
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	 * irq line. Use single irq line for all the SMMUv3 interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	 * ThunderX2 doesn't support MSIs from the SMMU, so we're checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	 * SPI numbers here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	return smmu->event_gsiv == smmu->pri_gsiv &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	       smmu->event_gsiv == smmu->gerr_gsiv &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	       smmu->event_gsiv == smmu->sync_gsiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	 * Override the size, for Cavium ThunderX2 implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	 * which doesn't support the page 1 SMMU register space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		return SZ_64K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	return SZ_128K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) static void __init arm_smmu_v3_init_resources(struct resource *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 					      struct acpi_iort_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	struct acpi_iort_smmu_v3 *smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	int num_res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	/* Retrieve SMMUv3 specific data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	res[num_res].start = smmu->base_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	res[num_res].end = smmu->base_address +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 				arm_smmu_v3_resource_size(smmu) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	res[num_res].flags = IORESOURCE_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	num_res++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	if (arm_smmu_v3_is_combined_irq(smmu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		if (smmu->event_gsiv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 			acpi_iort_register_irq(smmu->event_gsiv, "combined",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 					       ACPI_EDGE_SENSITIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 					       &res[num_res++]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		if (smmu->event_gsiv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 			acpi_iort_register_irq(smmu->event_gsiv, "eventq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 					       ACPI_EDGE_SENSITIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 					       &res[num_res++]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 		if (smmu->pri_gsiv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 			acpi_iort_register_irq(smmu->pri_gsiv, "priq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 					       ACPI_EDGE_SENSITIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 					       &res[num_res++]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		if (smmu->gerr_gsiv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 			acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 					       ACPI_EDGE_SENSITIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 					       &res[num_res++]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		if (smmu->sync_gsiv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 			acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 					       ACPI_EDGE_SENSITIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 					       &res[num_res++]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) static void __init arm_smmu_v3_dma_configure(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 					     struct acpi_iort_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	struct acpi_iort_smmu_v3 *smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	enum dev_dma_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	/* Retrieve SMMUv3 specific data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 			DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	/* We expect the dma masks to be equivalent for all SMMUv3 set-ups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	dev->dma_mask = &dev->coherent_dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	/* Configure DMA for the page table walker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	acpi_dma_configure(dev, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) #if defined(CONFIG_ACPI_NUMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)  * set numa proximity domain for smmuv3 device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) static int  __init arm_smmu_v3_set_proximity(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 					      struct acpi_iort_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	struct acpi_iort_smmu_v3 *smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		int dev_node = pxm_to_node(smmu->pxm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 		if (dev_node != NUMA_NO_NODE && !node_online(dev_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		set_dev_node(dev, dev_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 			smmu->base_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 			smmu->pxm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) #define arm_smmu_v3_set_proximity NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	struct acpi_iort_smmu *smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	/* Retrieve SMMU specific data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	smmu = (struct acpi_iort_smmu *)node->node_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	 * Only consider the global fault interrupt and ignore the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	 * configuration access interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	 * MMIO address and global fault interrupt resources are always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	 * present so add them to the context interrupt count as a static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	 * value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	return smmu->context_interrupt_count + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) static void __init arm_smmu_init_resources(struct resource *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 					   struct acpi_iort_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	struct acpi_iort_smmu *smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	int i, hw_irq, trigger, num_res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	u64 *ctx_irq, *glb_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	/* Retrieve SMMU specific data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	smmu = (struct acpi_iort_smmu *)node->node_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	res[num_res].start = smmu->base_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	res[num_res].end = smmu->base_address + smmu->span - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	res[num_res].flags = IORESOURCE_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	num_res++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	/* Global IRQs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	hw_irq = IORT_IRQ_MASK(glb_irq[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 				     &res[num_res++]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	/* Context IRQs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	for (i = 0; i < smmu->context_interrupt_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		hw_irq = IORT_IRQ_MASK(ctx_irq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 				       &res[num_res++]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) static void __init arm_smmu_dma_configure(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 					  struct acpi_iort_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	struct acpi_iort_smmu *smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	enum dev_dma_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	/* Retrieve SMMU specific data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	smmu = (struct acpi_iort_smmu *)node->node_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 			DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	/* We expect the dma masks to be equivalent for SMMU set-ups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	dev->dma_mask = &dev->coherent_dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	/* Configure DMA for the page table walker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	acpi_dma_configure(dev, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) static int __init arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	struct acpi_iort_pmcg *pmcg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	/* Retrieve PMCG specific data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	pmcg = (struct acpi_iort_pmcg *)node->node_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	 * There are always 2 memory resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	 * If the overflow_gsiv is present then add that for a total of 3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	return pmcg->overflow_gsiv ? 3 : 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 						   struct acpi_iort_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	struct acpi_iort_pmcg *pmcg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	/* Retrieve PMCG specific data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	pmcg = (struct acpi_iort_pmcg *)node->node_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	res[0].start = pmcg->page0_base_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	res[0].end = pmcg->page0_base_address + SZ_4K - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	res[0].flags = IORESOURCE_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	 * The initial version in DEN0049C lacked a way to describe register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	 * page 1, which makes it broken for most PMCG implementations; in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	 * that case, just let the driver fail gracefully if it expects to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	 * find a second memory resource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	if (node->revision > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		res[1].start = pmcg->page1_base_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		res[1].end = pmcg->page1_base_address + SZ_4K - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 		res[1].flags = IORESOURCE_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	if (pmcg->overflow_gsiv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 		acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 				       ACPI_EDGE_SENSITIVE, &res[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) static struct acpi_platform_list pmcg_plat_info[] __initdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	/* HiSilicon Hip08 Platform */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	{"HISI  ", "HIP08   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	 "Erratum #162001800", IORT_SMMU_V3_PMCG_HISI_HIP08},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	{ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) static int __init arm_smmu_v3_pmcg_add_platdata(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	u32 model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	idx = acpi_match_platform_list(pmcg_plat_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	if (idx >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		model = pmcg_plat_info[idx].data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		model = IORT_SMMU_V3_PMCG_GENERIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	return platform_device_add_data(pdev, &model, sizeof(model));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) struct iort_dev_config {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	int (*dev_init)(struct acpi_iort_node *node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	void (*dev_dma_configure)(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 				  struct acpi_iort_node *node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	int (*dev_count_resources)(struct acpi_iort_node *node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	void (*dev_init_resources)(struct resource *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 				     struct acpi_iort_node *node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	int (*dev_set_proximity)(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 				    struct acpi_iort_node *node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	int (*dev_add_platdata)(struct platform_device *pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	.name = "arm-smmu-v3",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	.dev_dma_configure = arm_smmu_v3_dma_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	.dev_count_resources = arm_smmu_v3_count_resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	.dev_init_resources = arm_smmu_v3_init_resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	.dev_set_proximity = arm_smmu_v3_set_proximity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) static const struct iort_dev_config iort_arm_smmu_cfg __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	.name = "arm-smmu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	.dev_dma_configure = arm_smmu_dma_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	.dev_count_resources = arm_smmu_count_resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	.dev_init_resources = arm_smmu_init_resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	.name = "arm-smmu-v3-pmcg",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	.dev_count_resources = arm_smmu_v3_pmcg_count_resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	.dev_init_resources = arm_smmu_v3_pmcg_init_resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	.dev_add_platdata = arm_smmu_v3_pmcg_add_platdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) static __init const struct iort_dev_config *iort_get_dev_cfg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 			struct acpi_iort_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	switch (node->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	case ACPI_IORT_NODE_SMMU_V3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 		return &iort_arm_smmu_v3_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	case ACPI_IORT_NODE_SMMU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		return &iort_arm_smmu_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	case ACPI_IORT_NODE_PMCG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 		return &iort_arm_smmu_v3_pmcg_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)  * iort_add_platform_device() - Allocate a platform device for IORT node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)  * @node: Pointer to device ACPI IORT node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)  * @ops: Pointer to IORT device config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)  * Returns: 0 on success, <0 failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) static int __init iort_add_platform_device(struct acpi_iort_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 					   const struct iort_dev_config *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	struct fwnode_handle *fwnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	struct platform_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	struct resource *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	int ret, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	if (!pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	if (ops->dev_set_proximity) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		ret = ops->dev_set_proximity(&pdev->dev, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 			goto dev_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	count = ops->dev_count_resources(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	r = kcalloc(count, sizeof(*r), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	if (!r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 		goto dev_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	ops->dev_init_resources(r, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	ret = platform_device_add_resources(pdev, r, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	 * Resources are duplicated in platform_device_add_resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	 * free their allocated memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	kfree(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		goto dev_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	 * Platform devices based on PMCG nodes uses platform_data to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	 * pass the hardware model info to the driver. For others, add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	 * a copy of IORT node pointer to platform_data to be used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	 * retrieve IORT data information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	if (ops->dev_add_platdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 		ret = ops->dev_add_platdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 		ret = platform_device_add_data(pdev, &node, sizeof(node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 		goto dev_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	fwnode = iort_get_fwnode(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	if (!fwnode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 		goto dev_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	pdev->dev.fwnode = fwnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	if (ops->dev_dma_configure)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 		ops->dev_dma_configure(&pdev->dev, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	iort_set_device_domain(&pdev->dev, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	ret = platform_device_add(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 		goto dma_deconfigure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) dma_deconfigure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	arch_teardown_dma_ops(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) dev_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	platform_device_put(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) static void __init iort_enable_acs(struct acpi_iort_node *iort_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	static bool acs_enabled __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	if (acs_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 		struct acpi_iort_node *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 		struct acpi_iort_id_mapping *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 		map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 				   iort_node->mapping_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		for (i = 0; i < iort_node->mapping_count; i++, map++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 			if (!map->output_reference)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 			parent = ACPI_ADD_PTR(struct acpi_iort_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 					iort_table,  map->output_reference);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 			 * If we detect a RC->SMMU mapping, make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 			 * we enable ACS on the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 			if ((parent->type == ACPI_IORT_NODE_SMMU) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 				(parent->type == ACPI_IORT_NODE_SMMU_V3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 				pci_request_acs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 				acs_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) static inline void iort_enable_acs(struct acpi_iort_node *iort_node) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) static void __init iort_init_platform_devices(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	struct acpi_iort_node *iort_node, *iort_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	struct acpi_table_iort *iort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	struct fwnode_handle *fwnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	const struct iort_dev_config *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	 * iort_table and iort both point to the start of IORT table, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	 * have different struct types
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	iort = (struct acpi_table_iort *)iort_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	/* Get the first IORT node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 				 iort->node_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 				iort_table->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	for (i = 0; i < iort->node_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 		if (iort_node >= iort_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 			pr_err("iort node pointer overflows, bad table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 		iort_enable_acs(iort_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 		ops = iort_get_dev_cfg(iort_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 		if (ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 			fwnode = acpi_alloc_fwnode_static();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 			if (!fwnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 			iort_set_fwnode(iort_node, fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 			ret = iort_add_platform_device(iort_node, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 			if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 				iort_delete_fwnode(iort_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 				acpi_free_fwnode_static(fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 		iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 					 iort_node->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) void __init acpi_iort_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	acpi_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	/* iort_table will be used at runtime after the iort init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	 * so we don't need to call acpi_put_table() to release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	 * the IORT table mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	if (ACPI_FAILURE(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 		if (status != AE_NOT_FOUND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 			const char *msg = acpi_format_exception(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 			pr_err("Failed to get table, %s\n", msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	iort_init_platform_devices();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) #ifdef CONFIG_ZONE_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)  * Extract the highest CPU physical address accessible to all DMA masters in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)  * the system. PHYS_ADDR_MAX is returned when no constrained device is found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) phys_addr_t __init acpi_iort_dma_get_max_cpu_address(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	phys_addr_t limit = PHYS_ADDR_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	struct acpi_iort_node *node, *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	struct acpi_table_iort *iort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	acpi_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	if (acpi_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 		return limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	status = acpi_get_table(ACPI_SIG_IORT, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 				(struct acpi_table_header **)&iort);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	if (ACPI_FAILURE(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 		return limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	node = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->node_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	end = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->header.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	for (i = 0; i < iort->node_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 		if (node >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 		switch (node->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 			struct acpi_iort_named_component *ncomp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 			struct acpi_iort_root_complex *rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 			phys_addr_t local_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 		case ACPI_IORT_NODE_NAMED_COMPONENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 			ncomp = (struct acpi_iort_named_component *)node->node_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 			local_limit = DMA_BIT_MASK(ncomp->memory_address_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 			limit = min_not_zero(limit, local_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 		case ACPI_IORT_NODE_PCI_ROOT_COMPLEX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 			if (node->revision < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 			rc = (struct acpi_iort_root_complex *)node->node_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 			local_limit = DMA_BIT_MASK(rc->memory_address_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 			limit = min_not_zero(limit, local_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 		node = ACPI_ADD_PTR(struct acpi_iort_node, node, node->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	acpi_put_table(&iort->header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	return limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) #endif