Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (C) 2019-2020 Linaro Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/soc/qcom/smem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include "ipa.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include "ipa_reg.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include "ipa_data.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include "ipa_cmd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include "ipa_mem.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include "ipa_table.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include "gsi_trans.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) /* "Canary" value placed between memory regions to detect overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define IPA_MEM_CANARY_VAL		cpu_to_le32(0xdeadbeef)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) /* SMEM host id representing the modem. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #define QCOM_SMEM_HOST_MODEM	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) /* Add an immediate command to a transaction that zeroes a memory region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	dma_addr_t addr = ipa->zero_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	if (!mem->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	ipa_cmd_dma_shared_mem_add(trans, mem->offset, mem->size, addr, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  * ipa_mem_setup() - Set up IPA AP and modem shared memory areas
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  * @ipa:	IPA pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * Set up the shared memory regions in IPA local memory.  This involves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  * zero-filling memory regions, and in the case of header memory, telling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * the IPA where it's located.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * This function performs the initial setup of this memory.  If the modem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  * crashes, its regions are re-zeroed in ipa_mem_zero_modem().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  * The AP informs the modem where its portions of memory are located
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  * in a QMI exchange that occurs at modem startup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * Return:	0 if successful, or a negative error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) int ipa_mem_setup(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	dma_addr_t addr = ipa->zero_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	struct gsi_trans *trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	u16 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	/* Get a transaction to define the header memory region and to zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	 * the processing context and modem memory regions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	trans = ipa_cmd_trans_alloc(ipa, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	if (!trans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		dev_err(&ipa->pdev->dev, "no transaction for memory setup\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	/* Initialize IPA-local header memory.  The modem and AP header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	 * regions are contiguous, and initialized together.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	offset = ipa->mem[IPA_MEM_MODEM_HEADER].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	size = ipa->mem[IPA_MEM_MODEM_HEADER].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	size += ipa->mem[IPA_MEM_AP_HEADER].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	ipa_cmd_hdr_init_local_add(trans, offset, size, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_PROC_CTX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_AP_PROC_CTX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	gsi_trans_commit_wait(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	/* Tell the hardware where the processing context area is located */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	iowrite32(ipa->mem_offset + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		  ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) void ipa_mem_teardown(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	/* Nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #ifdef IPA_VALIDATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	const struct ipa_mem *mem = &ipa->mem[mem_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	struct device *dev = &ipa->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	u16 size_multiple;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	/* Other than modem memory, sizes must be a multiple of 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	size_multiple = mem_id == IPA_MEM_MODEM ? 4 : 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	if (mem->size % size_multiple)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		dev_err(dev, "region %u size not a multiple of %u bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 			mem_id, size_multiple);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	else if (mem->offset % 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		dev_err(dev, "region %u offset not 8-byte aligned\n", mem_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	else if (mem->offset < mem->canary_count * sizeof(__le32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		dev_err(dev, "region %u offset too small for %hu canaries\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 			mem_id, mem->canary_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	else if (mem->offset + mem->size > ipa->mem_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		dev_err(dev, "region %u ends beyond memory limit (0x%08x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 			mem_id, ipa->mem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #else /* !IPA_VALIDATE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #endif /*! IPA_VALIDATE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  * ipa_mem_config() - Configure IPA shared memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  * @ipa:	IPA pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  * Return:	0 if successful, or a negative error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int ipa_mem_config(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	struct device *dev = &ipa->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	enum ipa_mem_id mem_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	u32 mem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	void *virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	/* Check the advertised location and size of the shared memory area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	val = ioread32(ipa->reg_virt + IPA_REG_SHARED_MEM_SIZE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	/* The fields in the register are in 8 byte units */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	ipa->mem_offset = 8 * u32_get_bits(val, SHARED_MEM_BADDR_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	/* Make sure the end is within the region's mapped space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	mem_size = 8 * u32_get_bits(val, SHARED_MEM_SIZE_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	/* If the sizes don't match, issue a warning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	if (ipa->mem_offset + mem_size > ipa->mem_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		dev_warn(dev, "ignoring larger reported memory size: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 			mem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	} else if (ipa->mem_offset + mem_size < ipa->mem_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		dev_warn(dev, "limiting IPA memory size to 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 			 mem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		ipa->mem_size = mem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	/* Prealloc DMA memory for zeroing regions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	virt = dma_alloc_coherent(dev, IPA_MEM_MAX, &addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	if (!virt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	ipa->zero_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	ipa->zero_virt = virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	ipa->zero_size = IPA_MEM_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	/* Verify each defined memory region is valid, and if indicated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	 * for the region, write "canary" values in the space prior to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	 * the region's base address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	for (mem_id = 0; mem_id < ipa->mem_count; mem_id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		const struct ipa_mem *mem = &ipa->mem[mem_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		u16 canary_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		__le32 *canary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		/* Validate all regions (even undefined ones) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		if (!ipa_mem_valid(ipa, mem_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 			goto err_dma_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		/* Skip over undefined regions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		if (!mem->offset && !mem->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		canary_count = mem->canary_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		if (!canary_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		/* Write canary values in the space before the region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		canary = ipa->mem_virt + ipa->mem_offset + mem->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			*--canary = IPA_MEM_CANARY_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		while (--canary_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	/* Make sure filter and route table memory regions are valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	if (!ipa_table_valid(ipa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		goto err_dma_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	/* Validate memory-related properties relevant to immediate commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	if (!ipa_cmd_data_valid(ipa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		goto err_dma_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	/* Verify the microcontroller ring alignment (0 is OK too) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	if (ipa->mem[IPA_MEM_UC_EVENT_RING].offset % 1024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		dev_err(dev, "microcontroller ring not 1024-byte aligned\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		goto err_dma_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) err_dma_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	dma_free_coherent(dev, IPA_MEM_MAX, ipa->zero_virt, ipa->zero_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /* Inverse of ipa_mem_config() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) void ipa_mem_deconfig(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	struct device *dev = &ipa->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	dma_free_coherent(dev, ipa->zero_size, ipa->zero_virt, ipa->zero_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	ipa->zero_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	ipa->zero_virt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	ipa->zero_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)  * ipa_mem_zero_modem() - Zero IPA-local memory regions owned by the modem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)  * @ipa:	IPA pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)  * Zero regions of IPA-local memory used by the modem.  These are configured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)  * (and initially zeroed) by ipa_mem_setup(), but if the modem crashes and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)  * restarts via SSR we need to re-initialize them.  A QMI message tells the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)  * modem where to find regions of IPA local memory it needs to know about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)  * (these included).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) int ipa_mem_zero_modem(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	struct gsi_trans *trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	/* Get a transaction to zero the modem memory, modem header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	 * and modem processing context regions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	trans = ipa_cmd_trans_alloc(ipa, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	if (!trans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		dev_err(&ipa->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 			"no transaction to zero modem memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_HEADER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_PROC_CTX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	gsi_trans_commit_wait(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)  * ipa_imem_init() - Initialize IMEM memory used by the IPA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)  * @ipa:	IPA pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)  * @addr:	Physical address of the IPA region in IMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)  * @size:	Size (bytes) of the IPA region in IMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)  * IMEM is a block of shared memory separate from system DRAM, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)  * a portion of this memory is available for the IPA to use.  The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)  * modem accesses this memory directly, but the IPA accesses it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)  * via the IOMMU, using the AP's credentials.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)  * If this region exists (size > 0) we map it for read/write access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)  * through the IOMMU using the IPA device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  * Note: @addr and @size are not guaranteed to be page-aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	struct device *dev = &ipa->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	struct iommu_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	unsigned long iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	phys_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	if (!size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		return 0;	/* IMEM memory not used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	domain = iommu_get_domain_for_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	if (!domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		dev_err(dev, "no IOMMU domain found for IMEM\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	/* Align the address down and the size up to page boundaries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	phys = addr & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	size = PAGE_ALIGN(size + addr - phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	iova = phys;	/* We just want a direct mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	ipa->imem_iova = iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	ipa->imem_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static void ipa_imem_exit(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	struct iommu_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	if (!ipa->imem_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	dev = &ipa->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	domain = iommu_get_domain_for_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	if (domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		size = iommu_unmap(domain, ipa->imem_iova, ipa->imem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		if (size != ipa->imem_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 			dev_warn(dev, "unmapped %zu IMEM bytes, expected %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 				 size, ipa->imem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		dev_err(dev, "couldn't get IPA IOMMU domain for IMEM\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	ipa->imem_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	ipa->imem_iova = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)  * ipa_smem_init() - Initialize SMEM memory used by the IPA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)  * @ipa:	IPA pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)  * @item:	Item ID of SMEM memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)  * @size:	Size (bytes) of SMEM memory region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)  * SMEM is a managed block of shared DRAM, from which numbered "items"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)  * can be allocated.  One item is designated for use by the IPA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)  * The modem accesses SMEM memory directly, but the IPA accesses it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)  * via the IOMMU, using the AP's credentials.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)  * If size provided is non-zero, we allocate it and map it for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)  * access through the IOMMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)  * Note: @size and the item address are is not guaranteed to be page-aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	struct device *dev = &ipa->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	struct iommu_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	unsigned long iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	phys_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	phys_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	size_t actual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	void *virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	if (!size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		return 0;	/* SMEM memory not used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	/* SMEM is memory shared between the AP and another system entity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	 * (in this case, the modem).  An allocation from SMEM is persistent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	 * until the AP reboots; there is no way to free an allocated SMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	 * region.  Allocation only reserves the space; to use it you need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	 * to "get" a pointer it (this implies no reference counting).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	 * The item might have already been allocated, in which case we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	 * use it unless the size isn't what we expect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	ret = qcom_smem_alloc(QCOM_SMEM_HOST_MODEM, item, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	if (ret && ret != -EEXIST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		dev_err(dev, "error %d allocating size %zu SMEM item %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 			ret, size, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	/* Now get the address of the SMEM memory region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	virt = qcom_smem_get(QCOM_SMEM_HOST_MODEM, item, &actual);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	if (IS_ERR(virt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		ret = PTR_ERR(virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		dev_err(dev, "error %d getting SMEM item %u\n", ret, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	/* In case the region was already allocated, verify the size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	if (ret && actual != size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		dev_err(dev, "SMEM item %u has size %zu, expected %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 			item, actual, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	domain = iommu_get_domain_for_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	if (!domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		dev_err(dev, "no IOMMU domain found for SMEM\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	/* Align the address down and the size up to a page boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	addr = qcom_smem_virt_to_phys(virt) & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	phys = addr & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	size = PAGE_ALIGN(size + addr - phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	iova = phys;	/* We just want a direct mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	ipa->smem_iova = iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	ipa->smem_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static void ipa_smem_exit(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	struct device *dev = &ipa->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	struct iommu_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	domain = iommu_get_domain_for_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	if (domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		size = iommu_unmap(domain, ipa->smem_iova, ipa->smem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		if (size != ipa->smem_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 			dev_warn(dev, "unmapped %zu SMEM bytes, expected %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 				 size, ipa->smem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		dev_err(dev, "couldn't get IPA IOMMU domain for SMEM\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	ipa->smem_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	ipa->smem_iova = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /* Perform memory region-related initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	struct device *dev = &ipa->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	if (mem_data->local_count > IPA_MEM_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		dev_err(dev, "to many memory regions (%u > %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 			mem_data->local_count, IPA_MEM_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	ret = dma_set_mask_and_coherent(&ipa->pdev->dev, DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		dev_err(dev, "error %d setting DMA mask\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 					   "ipa-shared");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 			"DT error getting \"ipa-shared\" memory property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	ipa->mem_virt = memremap(res->start, resource_size(res), MEMREMAP_WC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	if (!ipa->mem_virt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		dev_err(dev, "unable to remap \"ipa-shared\" memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	ipa->mem_addr = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	ipa->mem_size = resource_size(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	/* The ipa->mem[] array is indexed by enum ipa_mem_id values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	ipa->mem_count = mem_data->local_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	ipa->mem = mem_data->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	ret = ipa_imem_init(ipa, mem_data->imem_addr, mem_data->imem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		goto err_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	ret = ipa_smem_init(ipa, mem_data->smem_id, mem_data->smem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		goto err_imem_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) err_imem_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	ipa_imem_exit(ipa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) err_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	memunmap(ipa->mem_virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /* Inverse of ipa_mem_init() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) void ipa_mem_exit(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	ipa_smem_exit(ipa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	ipa_imem_exit(ipa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	memunmap(ipa->mem_virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }