Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3) /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (C) 2019-2020 Linaro Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/if_rmnet.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/dma-direction.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include "gsi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include "gsi_trans.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include "ipa.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include "ipa_data.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include "ipa_endpoint.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include "ipa_cmd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include "ipa_mem.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include "ipa_modem.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include "ipa_table.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include "ipa_gsi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include "ipa_clock.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #define atomic_dec_not_zero(v)	atomic_add_unless((v), -1, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #define IPA_REPLENISH_BATCH	16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) /* RX buffer is 1 page (or a power-of-2 contiguous pages) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #define IPA_RX_BUFFER_SIZE	8192	/* PAGE_SIZE > 4096 wastes a LOT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) /* The amount of RX buffer space consumed by standard skb overhead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #define IPA_RX_BUFFER_OVERHEAD	(PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #define IPA_ENDPOINT_QMAP_METADATA_MASK		0x000000ff /* host byte order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX	3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define IPA_AGGR_TIME_LIMIT_DEFAULT		500	/* microseconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) /** enum ipa_status_opcode - status element opcode hardware values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) enum ipa_status_opcode {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	IPA_STATUS_OPCODE_PACKET		= 0x01,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	IPA_STATUS_OPCODE_DROPPED_PACKET	= 0x04,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	IPA_STATUS_OPCODE_SUSPENDED_PACKET	= 0x08,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	IPA_STATUS_OPCODE_PACKET_2ND_PASS	= 0x40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) /** enum ipa_status_exception - status element exception type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) enum ipa_status_exception {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	/* 0 means no exception */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	IPA_STATUS_EXCEPTION_DEAGGR		= 0x01,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) /* Status element provided by hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) struct ipa_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	u8 opcode;		/* enum ipa_status_opcode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	u8 exception;		/* enum ipa_status_exception */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	__le16 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	__le16 pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	u8 endp_src_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	u8 endp_dst_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	__le32 metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	__le32 flags1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	__le64 flags2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	__le32 flags3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	__le32 flags4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) /* Field masks for struct ipa_status structure fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #define IPA_STATUS_DST_IDX_FMASK		GENMASK(4, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK	GENMASK(31, 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #ifdef IPA_VALIDATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) static void ipa_endpoint_validate_build(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	/* The aggregation byte limit defines the point at which an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	 * aggregation window will close.  It is programmed into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	 * IPA hardware as a number of KB.  We don't use "hard byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	 * limit" aggregation, which means that we need to supply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	 * enough space in a receive buffer to hold a complete MTU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	 * plus normal skb overhead *after* that aggregation byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	 * limit has been crossed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	 * This check just ensures we don't define a receive buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	 * size that would exceed what we can represent in the field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	 * that is used to program its size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	BUILD_BUG_ON(IPA_RX_BUFFER_SIZE >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 		     field_max(AGGR_BYTE_LIMIT_FMASK) * SZ_1K +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 		     IPA_MTU + IPA_RX_BUFFER_OVERHEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	/* I honestly don't know where this requirement comes from.  But
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	 * it holds, and if we someday need to loosen the constraint we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	 * can try to track it down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	BUILD_BUG_ON(sizeof(struct ipa_status) % 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 			    const struct ipa_gsi_endpoint_data *all_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 			    const struct ipa_gsi_endpoint_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	const struct ipa_gsi_endpoint_data *other_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	struct device *dev = &ipa->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	enum ipa_endpoint_name other_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	if (ipa_gsi_endpoint_data_empty(data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	if (!data->toward_ipa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 		if (data->endpoint.filter_support) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 			dev_err(dev, "filtering not supported for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 					"RX endpoint %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 				data->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		return true;	/* Nothing more to check for RX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	if (data->endpoint.config.status_enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		other_name = data->endpoint.config.tx.status_endpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 		if (other_name >= count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 			dev_err(dev, "status endpoint name %u out of range "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 					"for endpoint %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 				other_name, data->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		/* Status endpoint must be defined... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		other_data = &all_data[other_name];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		if (ipa_gsi_endpoint_data_empty(other_data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 			dev_err(dev, "DMA endpoint name %u undefined "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 					"for endpoint %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 				other_name, data->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		/* ...and has to be an RX endpoint... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 		if (other_data->toward_ipa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 			dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 				"status endpoint for endpoint %u not RX\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 				data->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 		/* ...and if it's to be an AP endpoint... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		if (other_data->ee_id == GSI_EE_AP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 			/* ...make sure it has status enabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 			if (!other_data->endpoint.config.status_enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 				dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 					"status not enabled for endpoint %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 					other_data->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 				return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	if (data->endpoint.config.dma_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 		other_name = data->endpoint.config.dma_endpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		if (other_name >= count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 			dev_err(dev, "DMA endpoint name %u out of range "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 					"for endpoint %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 				other_name, data->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 		other_data = &all_data[other_name];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 		if (ipa_gsi_endpoint_data_empty(other_data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 			dev_err(dev, "DMA endpoint name %u undefined "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 					"for endpoint %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 				other_name, data->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 				    const struct ipa_gsi_endpoint_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	const struct ipa_gsi_endpoint_data *dp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	struct device *dev = &ipa->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	enum ipa_endpoint_name name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	ipa_endpoint_validate_build();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	if (count > IPA_ENDPOINT_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		dev_err(dev, "too many endpoints specified (%u > %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 			count, IPA_ENDPOINT_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	/* Make sure needed endpoints have defined data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		dev_err(dev, "command TX endpoint not defined\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		dev_err(dev, "LAN RX endpoint not defined\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		dev_err(dev, "AP->modem TX endpoint not defined\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 		dev_err(dev, "AP<-modem RX endpoint not defined\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	for (name = 0; name < count; name++, dp++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) #else /* !IPA_VALIDATE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 				    const struct ipa_gsi_endpoint_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) #endif /* !IPA_VALIDATE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) /* Allocate a transaction to use on a non-command endpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 						  u32 tre_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	struct gsi *gsi = &endpoint->ipa->gsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	u32 channel_id = endpoint->channel_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	enum dma_data_direction direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) /* suspend_delay represents suspend for RX, delay for TX endpoints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247)  * Note that suspend is not supported starting with IPA v4.0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	struct ipa *ipa = endpoint->ipa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	bool state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	/* Suspend is not supported for IPA v4.0+.  Delay doesn't work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	 * correctly on IPA v4.2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	 * if (endpoint->toward_ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	 * 	assert(ipa->version != IPA_VERSION_4.2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	 * else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	 * 	assert(ipa->version == IPA_VERSION_3_5_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	val = ioread32(ipa->reg_virt + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	/* Don't bother if it's already in the requested state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	state = !!(val & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	if (suspend_delay != state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		val ^= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		iowrite32(val, ipa->reg_virt + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	return state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) /* We currently don't care what the previous state was for delay mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	/* assert(endpoint->toward_ipa); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	/* Delay mode doesn't work properly for IPA v4.2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	if (endpoint->ipa->version != IPA_VERSION_4_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		(void)ipa_endpoint_init_ctrl(endpoint, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	u32 mask = BIT(endpoint->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	struct ipa *ipa = endpoint->ipa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	/* assert(mask & ipa->available); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	offset = ipa_reg_state_aggr_active_offset(ipa->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	val = ioread32(ipa->reg_virt + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	return !!(val & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	u32 mask = BIT(endpoint->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	struct ipa *ipa = endpoint->ipa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	/* assert(mask & ipa->available); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314)  * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315)  * @endpoint:	Endpoint on which to emulate a suspend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317)  *  Emulate suspend IPA interrupt to unsuspend an endpoint suspended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318)  *  with an open aggregation frame.  This is to work around a hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319)  *  issue in IPA version 3.5.1 where the suspend interrupt will not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320)  *  generated when it should be.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	struct ipa *ipa = endpoint->ipa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	if (!endpoint->data->aggregation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	/* Nothing to do if the endpoint doesn't have aggregation open */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	if (!ipa_endpoint_aggr_active(endpoint))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	/* Force close aggregation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	ipa_endpoint_force_close(endpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	ipa_interrupt_simulate_suspend(ipa->interrupt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) /* Returns previous suspend state (true means suspend was enabled) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	bool suspended;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	if (endpoint->ipa->version != IPA_VERSION_3_5_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		return enable;	/* For IPA v4.0+, no change made */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	/* assert(!endpoint->toward_ipa); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	suspended = ipa_endpoint_init_ctrl(endpoint, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	/* A client suspended with an open aggregation frame will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	 * generate a SUSPEND IPA interrupt.  If enabling suspend, have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	 * ipa_endpoint_suspend_aggr() handle this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	if (enable && !suspended)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		ipa_endpoint_suspend_aggr(endpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	return suspended;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) /* Enable or disable delay or suspend mode on all modem endpoints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	u32 endpoint_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	/* DELAY mode doesn't work correctly on IPA v4.2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	if (ipa->version == IPA_VERSION_4_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		if (endpoint->ee_id != GSI_EE_MODEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		/* Set TX delay mode or RX suspend mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		if (endpoint->toward_ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 			ipa_endpoint_program_delay(endpoint, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 			(void)ipa_endpoint_program_suspend(endpoint, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) /* Reset all modem endpoints to use the default exception endpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	u32 initialized = ipa->initialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	struct gsi_trans *trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	/* We need one command per modem TX endpoint.  We can get an upper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	 * bound on that by assuming all initialized endpoints are modem->IPA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	 * That won't happen, and we could be more precise, but this is fine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	 * for now.  We need to end the transaction with a "tag process."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	count = hweight32(initialized) + ipa_cmd_tag_process_count();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	trans = ipa_cmd_trans_alloc(ipa, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	if (!trans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		dev_err(&ipa->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 			"no transaction to reset modem exception endpoints\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	while (initialized) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		u32 endpoint_id = __ffs(initialized);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		struct ipa_endpoint *endpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		initialized ^= BIT(endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		/* We only reset modem TX endpoints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		endpoint = &ipa->endpoint[endpoint_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		/* Value written is 0, and all bits are updated.  That
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		 * means status is disabled on the endpoint, and as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		 * result all other fields in the register are ignored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	ipa_cmd_tag_process_add(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	/* XXX This should have a 1 second timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	gsi_trans_commit_wait(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	/* FRAG_OFFLOAD_EN is 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	if (endpoint->data->checksum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		if (endpoint->toward_ipa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 			u32 checksum_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 			val |= u32_encode_bits(IPA_CS_OFFLOAD_UL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 					       CS_OFFLOAD_EN_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 			/* Checksum header offset is in 4-byte units */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 			checksum_offset = sizeof(struct rmnet_map_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 			checksum_offset /= sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 			val |= u32_encode_bits(checksum_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 					       CS_METADATA_HDR_OFFSET_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 			val |= u32_encode_bits(IPA_CS_OFFLOAD_DL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 					       CS_OFFLOAD_EN_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		val |= u32_encode_bits(IPA_CS_OFFLOAD_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 				       CS_OFFLOAD_EN_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	/* CS_GEN_QMB_MASTER_SEL is 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	iowrite32(val, endpoint->ipa->reg_virt + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465)  * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466)  * @endpoint:	Endpoint pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468)  * We program QMAP endpoints so each packet received is preceded by a QMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469)  * header structure.  The QMAP header contains a 1-byte mux_id and 2-byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470)  * packet size field, and we have the IPA hardware populate both for each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471)  * received packet.  The header is configured (in the HDR_EXT register)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472)  * to use big endian format.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474)  * The packet size is written into the QMAP header's pkt_len field.  That
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475)  * location is defined here using the HDR_OFST_PKT_SIZE field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477)  * The mux_id comes from a 4-byte metadata value supplied with each packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478)  * by the modem.  It is *not* a QMAP header, but it does contain the mux_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479)  * value that we want, in its low-order byte.  A bitmask defined in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480)  * endpoint's METADATA_MASK register defines which byte within the modem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481)  * metadata contains the mux_id.  And the OFST_METADATA field programmed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482)  * here indicates where the extracted byte should be placed within the QMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483)  * header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	if (endpoint->data->qmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		size_t header_size = sizeof(struct rmnet_map_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		/* We might supply a checksum header after the QMAP header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		if (endpoint->toward_ipa && endpoint->data->checksum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 			header_size += sizeof(struct rmnet_map_ul_csum_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		val |= u32_encode_bits(header_size, HDR_LEN_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		/* Define how to fill fields in a received QMAP header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		if (!endpoint->toward_ipa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 			u32 off;	/* Field offset within header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 			/* Where IPA will write the metadata value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 			off = offsetof(struct rmnet_map_header, mux_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 			val |= u32_encode_bits(off, HDR_OFST_METADATA_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 			/* Where IPA will write the length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 			off = offsetof(struct rmnet_map_header, pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 			val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 			val |= u32_encode_bits(off, HDR_OFST_PKT_SIZE_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		/* For QMAP TX, metadata offset is 0 (modem assumes this) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		val |= HDR_OFST_METADATA_VALID_FMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		/* HDR_A5_MUX is 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		/* HDR_LEN_INC_DEAGG_HDR is 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		/* HDR_METADATA_REG_VALID is 0 (TX only) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	iowrite32(val, endpoint->ipa->reg_virt + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	u32 pad_align = endpoint->data->rx.pad_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	val |= HDR_ENDIANNESS_FMASK;		/* big endian */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	/* A QMAP header contains a 6 bit pad field at offset 0.  The RMNet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	 * driver assumes this field is meaningful in packets it receives,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	 * and assumes the header's payload length includes that padding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	 * The RMNet driver does *not* pad packets it sends, however, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	 * the pad field (although 0) should be ignored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	if (endpoint->data->qmap && !endpoint->toward_ipa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		/* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	if (!endpoint->toward_ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	iowrite32(val, endpoint->ipa->reg_virt + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	u32 endpoint_id = endpoint->endpoint_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	if (endpoint->toward_ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		return;		/* Register not valid for TX endpoints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	/* Note that HDR_ENDIANNESS indicates big endian header fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	if (endpoint->data->qmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		val = cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	iowrite32(val, endpoint->ipa->reg_virt + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	if (!endpoint->toward_ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		return;		/* Register not valid for RX endpoints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	if (endpoint->data->dma_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		enum ipa_endpoint_name name = endpoint->data->dma_endpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		u32 dma_endpoint_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		val = u32_encode_bits(IPA_DMA, MODE_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	/* All other bits unspecified (and 0) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	iowrite32(val, endpoint->ipa->reg_virt + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) /* Compute the aggregation size value to use for a given buffer size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) static u32 ipa_aggr_size_kb(u32 rx_buffer_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	/* We don't use "hard byte limit" aggregation, so we define the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	 * aggregation limit such that our buffer has enough space *after*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	 * that limit to receive a full MTU of data, plus overhead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	return rx_buffer_size / SZ_1K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	if (endpoint->data->aggregation) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		if (!endpoint->toward_ipa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 			u32 limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 			val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 			val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 			limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 			val |= u32_encode_bits(limit, AGGR_BYTE_LIMIT_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 			limit = IPA_AGGR_TIME_LIMIT_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 			limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 			val |= u32_encode_bits(limit, AGGR_TIME_LIMIT_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 			/* AGGR_PKT_LIMIT is 0 (unlimited) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 			if (endpoint->data->rx.aggr_close_eof)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 				val |= AGGR_SW_EOF_ACTIVE_FMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 			/* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 			val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 					       AGGR_EN_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 			val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 			/* other fields ignored */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		/* AGGR_FORCE_CLOSE is 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		/* other fields ignored */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	iowrite32(val, endpoint->ipa->reg_virt + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) /* The head-of-line blocking timer is defined as a tick count, where each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646)  * tick represents 128 cycles of the IPA core clock.  Return the value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647)  * that should be written to that register that represents the timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648)  * period provided.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	u32 width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	u32 scale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	u64 ticks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	u64 rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	u32 high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	if (!microseconds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		return 0;	/* Nothing to compute if timer period is 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	/* Use 64 bit arithmetic to avoid overflow... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	rate = ipa_clock_rate(ipa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	/* ...but we still need to fit into a 32-bit register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	WARN_ON(ticks > U32_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	/* IPA v3.5.1 just records the tick count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	if (ipa->version == IPA_VERSION_3_5_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		return (u32)ticks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	/* For IPA v4.2, the tick count is represented by base and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	 * scale fields within the 32-bit timer register, where:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	 *     ticks = base << scale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	 * The best precision is achieved when the base value is as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	 * large as possible.  Find the highest set bit in the tick
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	 * count, and extract the number of bits in the base field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	 * such that that high bit is included.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	high = fls(ticks);		/* 1..32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	width = HWEIGHT32(BASE_VALUE_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	scale = high > width ? high - width : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	if (scale) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		/* If we're scaling, round up to get a closer result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		ticks += 1 << (scale - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		/* High bit was set, so rounding might have affected it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		if (fls(ticks) != high)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 			scale++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	val = u32_encode_bits(scale, SCALE_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) /* If microseconds is 0, timeout is immediate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 					      u32 microseconds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	u32 endpoint_id = endpoint->endpoint_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	struct ipa *ipa = endpoint->ipa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	/* This should only be changed when HOL_BLOCK_EN is disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	val = ipa_reg_init_hol_block_timer_val(ipa, microseconds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	iowrite32(val, ipa->reg_virt + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	u32 endpoint_id = endpoint->endpoint_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	val = enable ? HOL_BLOCK_EN_FMASK : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	iowrite32(val, endpoint->ipa->reg_virt + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		struct ipa_endpoint *endpoint = &ipa->endpoint[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		ipa_endpoint_init_hol_block_enable(endpoint, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		ipa_endpoint_init_hol_block_timer(endpoint, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		ipa_endpoint_init_hol_block_enable(endpoint, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	if (!endpoint->toward_ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		return;		/* Register not valid for RX endpoints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	/* DEAGGR_HDR_LEN is 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	/* PACKET_OFFSET_VALID is 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	/* PACKET_OFFSET_LOCATION is ignored (not valid) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	/* MAX_PACKET_LEN is 0 (not enforced) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	iowrite32(val, endpoint->ipa->reg_virt + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	u32 seq_type = endpoint->seq_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	if (!endpoint->toward_ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		return;		/* Register not valid for RX endpoints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	/* Sequencer type is made up of four nibbles */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	/* The second two apply to replicated packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	val |= u32_encode_bits((seq_type >> 8) & 0xf, HPS_REP_SEQ_TYPE_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	val |= u32_encode_bits((seq_type >> 12) & 0xf, DPS_REP_SEQ_TYPE_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	iowrite32(val, endpoint->ipa->reg_virt + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776)  * ipa_endpoint_skb_tx() - Transmit a socket buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777)  * @endpoint:	Endpoint pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778)  * @skb:	Socket buffer to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780)  * Returns:	0 if successful, or a negative error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	struct gsi_trans *trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	u32 nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	/* Make sure source endpoint's TLV FIFO has enough entries to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	 * hold the linear portion of the skb and all its fragments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	 * If not, see if we can linearize it before giving up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	nr_frags = skb_shinfo(skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	if (1 + nr_frags > endpoint->trans_tre_max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		if (skb_linearize(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 			return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		nr_frags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	if (!trans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	ret = gsi_trans_skb_add(trans, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		goto err_trans_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	trans->data = skb;	/* transaction owns skb now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	gsi_trans_commit(trans, !netdev_xmit_more());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) err_trans_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	gsi_trans_free(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	u32 endpoint_id = endpoint->endpoint_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	struct ipa *ipa = endpoint->ipa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	if (endpoint->data->status_enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		val |= STATUS_EN_FMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		if (endpoint->toward_ipa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 			enum ipa_endpoint_name name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 			u32 status_endpoint_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 			name = endpoint->data->tx.status_endpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 			status_endpoint_id = ipa->name_map[name]->endpoint_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 			val |= u32_encode_bits(status_endpoint_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 					       STATUS_ENDP_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		/* STATUS_LOCATION is 0 (status element precedes packet) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		/* The next field is present for IPA v4.0 and above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		/* STATUS_PKT_SUPPRESS_FMASK is 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	iowrite32(val, ipa->reg_virt + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	struct gsi_trans *trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	bool doorbell = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	trans = ipa_endpoint_trans_alloc(endpoint, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	if (!trans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		goto err_free_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	/* Offset the buffer to make space for skb headroom */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	offset = NET_SKB_PAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	len = IPA_RX_BUFFER_SIZE - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	ret = gsi_trans_page_add(trans, page, len, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		goto err_trans_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	trans->data = page;	/* transaction owns page now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		doorbell = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		endpoint->replenish_ready = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	gsi_trans_commit(trans, doorbell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) err_trans_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	gsi_trans_free(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) err_free_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	__free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891)  * ipa_endpoint_replenish() - Replenish the Rx packets cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892)  * @endpoint:	Endpoint to be replenished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893)  * @count:	Number of buffers to send to hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895)  * Allocate RX packet wrapper structures with maximal socket buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896)  * for an endpoint.  These are supplied to the hardware, which fills
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897)  * them with incoming data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	struct gsi *gsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	u32 backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 			atomic_add(count, &endpoint->replenish_saved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	/* If already active, just update the backlog */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 			atomic_add(count, &endpoint->replenish_backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	while (atomic_dec_not_zero(&endpoint->replenish_backlog))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		if (ipa_endpoint_replenish_one(endpoint))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 			goto try_again_later;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		atomic_add(count, &endpoint->replenish_backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) try_again_later:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	/* The last one didn't succeed, so fix the backlog */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	backlog = atomic_add_return(count + 1, &endpoint->replenish_backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	/* Whenever a receive buffer transaction completes we'll try to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	 * replenish again.  It's unlikely, but if we fail to supply even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	 * one buffer, nothing will trigger another replenish attempt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	 * Receive buffer transactions use one TRE, so schedule work to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	 * try replenishing again if our backlog is *all* available TREs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	gsi = &endpoint->ipa->gsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		schedule_delayed_work(&endpoint->replenish_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 				      msecs_to_jiffies(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	struct gsi *gsi = &endpoint->ipa->gsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	u32 max_backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	u32 saved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		atomic_add(saved, &endpoint->replenish_backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	/* Start replenishing if hardware currently has no buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		ipa_endpoint_replenish(endpoint, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	u32 backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		atomic_add(backlog, &endpoint->replenish_saved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) static void ipa_endpoint_replenish_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	struct delayed_work *dwork = to_delayed_work(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	struct ipa_endpoint *endpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	ipa_endpoint_replenish(endpoint, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 				  void *data, u32 len, u32 extra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	skb = __dev_alloc_skb(len, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		skb_put(skb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		memcpy(skb->data, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		skb->truesize += extra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	/* Now receive it, or drop it if there's no netdev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	if (endpoint->netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		ipa_modem_skb_rx(endpoint->netdev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	else if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 				   struct page *page, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	/* Nothing to do if there's no netdev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	if (!endpoint->netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	/* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		/* Reserve the headroom and account for the data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		skb_reserve(skb, NET_SKB_PAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		skb_put(skb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	/* Receive the buffer (or record drop if unable to build it) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	ipa_modem_skb_rx(endpoint->netdev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	return skb != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) /* The format of a packet status element is the same for several status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)  * types (opcodes).  Other types aren't currently supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	switch (opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	case IPA_STATUS_OPCODE_PACKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	case IPA_STATUS_OPCODE_DROPPED_PACKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 				     const struct ipa_status *status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	u32 endpoint_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	if (!ipa_status_format_packet(status->opcode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	if (!status->pkt_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	endpoint_id = u32_get_bits(status->endp_dst_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 				   IPA_STATUS_DST_IDX_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	if (endpoint_id != endpoint->endpoint_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	return false;	/* Don't skip this packet, process it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) /* Return whether the status indicates the packet should be dropped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) static bool ipa_status_drop_packet(const struct ipa_status *status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	/* Deaggregation exceptions we drop; all other types we consume */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	if (status->exception)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	/* Drop the packet if it fails to match a routing rule; otherwise no */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 				      struct page *page, u32 total_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	void *data = page_address(page) + NET_SKB_PAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	u32 unused = IPA_RX_BUFFER_SIZE - total_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	u32 resid = total_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	while (resid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		const struct ipa_status *status = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		u32 align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		if (resid < sizeof(*status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 			dev_err(&endpoint->ipa->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 				"short message (%u bytes < %zu byte status)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 				resid, sizeof(*status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		/* Skip over status packets that lack packet data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		if (ipa_endpoint_status_skip(endpoint, status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 			data += sizeof(*status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 			resid -= sizeof(*status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		/* Compute the amount of buffer space consumed by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		 * packet, including the status element.  If the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		 * is configured to pad packet data to an aligned boundary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		 * account for that.  And if checksum offload is is enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		 * a trailer containing computed checksum information will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		 * be appended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		align = endpoint->data->rx.pad_align ? : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		len = le16_to_cpu(status->pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		len = sizeof(*status) + ALIGN(len, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		if (endpoint->data->checksum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 			len += sizeof(struct rmnet_map_dl_csum_trailer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		/* Charge the new packet with a proportional fraction of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		 * the unused space in the original receive buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		 * XXX Charge a proportion of the *whole* receive buffer?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		if (!ipa_status_drop_packet(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 			u32 extra = unused * len / total_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 			void *data2 = data + sizeof(*status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 			u32 len2 = le16_to_cpu(status->pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 			/* Client receives only packet data (no status) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 			ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		/* Consume status and the full packet it describes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		data += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		resid -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) /* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 				     struct gsi_trans *trans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /* Complete transaction initiated in ipa_endpoint_replenish_one() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 				     struct gsi_trans *trans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	ipa_endpoint_replenish(endpoint, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	if (trans->cancelled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	/* Parse or build a socket buffer using the actual received length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	page = trans->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	if (endpoint->data->status_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		ipa_endpoint_status_parse(endpoint, page, trans->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		trans->data = NULL;	/* Pages have been consumed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 				 struct gsi_trans *trans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	if (endpoint->toward_ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		ipa_endpoint_tx_complete(endpoint, trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		ipa_endpoint_rx_complete(endpoint, trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 				struct gsi_trans *trans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	if (endpoint->toward_ipa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		struct ipa *ipa = endpoint->ipa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 		/* Nothing to do for command transactions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 			struct sk_buff *skb = trans->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 			if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 				dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		struct page *page = trans->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 		if (page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 			__free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	/* ROUTE_DIS is 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	val |= ROUTE_DEF_HDR_TABLE_FMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	val |= ROUTE_DEF_RETAIN_HDR_FMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) void ipa_endpoint_default_route_clear(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	ipa_endpoint_default_route_set(ipa, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)  * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)  * @endpoint:	Endpoint to be reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)  * If aggregation is active on an RX endpoint when a reset is performed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)  * on its underlying GSI channel, a special sequence of actions must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)  * taken to ensure the IPA pipeline is properly cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)  * Return:	0 if successful, or a negative error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	struct device *dev = &endpoint->ipa->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	struct ipa *ipa = endpoint->ipa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	struct gsi *gsi = &ipa->gsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	bool suspended = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	bool legacy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	u32 retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	u32 len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	void *virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	virt = kzalloc(len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	if (!virt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	if (dma_mapping_error(dev, addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		goto out_kfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	/* Force close aggregation before issuing the reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	ipa_endpoint_force_close(endpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	/* Reset and reconfigure the channel with the doorbell engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	 * disabled.  Then poll until we know aggregation is no longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	 * active.  We'll re-enable the doorbell (if appropriate) when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	 * we reset again below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	gsi_channel_reset(gsi, endpoint->channel_id, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	/* Make sure the channel isn't suspended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	suspended = ipa_endpoint_program_suspend(endpoint, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	/* Start channel and do a 1 byte read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	ret = gsi_channel_start(gsi, endpoint->channel_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		goto out_suspend_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 		goto err_endpoint_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	/* Wait for aggregation to be closed on the channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		if (!ipa_endpoint_aggr_active(endpoint))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	} while (retries--);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	/* Check one last time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	if (ipa_endpoint_aggr_active(endpoint))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		dev_err(dev, "endpoint %u still active during reset\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 			endpoint->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	gsi_trans_read_byte_done(gsi, endpoint->channel_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	ret = gsi_channel_stop(gsi, endpoint->channel_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		goto out_suspend_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	/* Finally, reset and reconfigure the channel again (re-enabling the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	 * the doorbell engine if appropriate).  Sleep for 1 millisecond to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	 * complete the channel reset sequence.  Finish by suspending the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	 * channel again (if necessary).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	legacy = ipa->version == IPA_VERSION_3_5_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	gsi_channel_reset(gsi, endpoint->channel_id, legacy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	goto out_suspend_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) err_endpoint_stop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	(void)gsi_channel_stop(gsi, endpoint->channel_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) out_suspend_again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	if (suspended)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		(void)ipa_endpoint_program_suspend(endpoint, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) out_kfree:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	kfree(virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	u32 channel_id = endpoint->channel_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	struct ipa *ipa = endpoint->ipa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	bool special;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	bool legacy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	/* On IPA v3.5.1, if an RX endpoint is reset while aggregation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	 * is active, we need to handle things specially to recover.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	 * All other cases just need to reset the underlying GSI channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	 * IPA v3.5.1 enables the doorbell engine.  Newer versions do not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	legacy = ipa->version == IPA_VERSION_3_5_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	special = !endpoint->toward_ipa && endpoint->data->aggregation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	if (special && ipa_endpoint_aggr_active(endpoint))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		ret = ipa_endpoint_reset_rx_aggr(endpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		gsi_channel_reset(&ipa->gsi, channel_id, legacy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		dev_err(&ipa->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 			"error %d resetting channel %u for endpoint %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 			ret, endpoint->channel_id, endpoint->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	if (endpoint->toward_ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		ipa_endpoint_program_delay(endpoint, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		(void)ipa_endpoint_program_suspend(endpoint, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	ipa_endpoint_init_cfg(endpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	ipa_endpoint_init_hdr(endpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	ipa_endpoint_init_hdr_ext(endpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	ipa_endpoint_init_hdr_metadata_mask(endpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	ipa_endpoint_init_mode(endpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	ipa_endpoint_init_aggr(endpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	ipa_endpoint_init_deaggr(endpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	ipa_endpoint_init_seq(endpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	ipa_endpoint_status(endpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	struct ipa *ipa = endpoint->ipa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	struct gsi *gsi = &ipa->gsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	ret = gsi_channel_start(gsi, endpoint->channel_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		dev_err(&ipa->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 			"error %d starting %cX channel %u for endpoint %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 			ret, endpoint->toward_ipa ? 'T' : 'R',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 			endpoint->channel_id, endpoint->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	if (!endpoint->toward_ipa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		ipa_interrupt_suspend_enable(ipa->interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 					     endpoint->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		ipa_endpoint_replenish_enable(endpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	ipa->enabled |= BIT(endpoint->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	u32 mask = BIT(endpoint->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	struct ipa *ipa = endpoint->ipa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	struct gsi *gsi = &ipa->gsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	if (!(ipa->enabled & mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	ipa->enabled ^= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	if (!endpoint->toward_ipa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		ipa_endpoint_replenish_disable(endpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		ipa_interrupt_suspend_disable(ipa->interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 					      endpoint->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	/* Note that if stop fails, the channel's state is not well-defined */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	ret = gsi_channel_stop(gsi, endpoint->channel_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		dev_err(&ipa->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 			"error %d attempting to stop endpoint %u\n", ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 			endpoint->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	struct device *dev = &endpoint->ipa->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	struct gsi *gsi = &endpoint->ipa->gsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	bool stop_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	if (!endpoint->toward_ipa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		ipa_endpoint_replenish_disable(endpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		(void)ipa_endpoint_program_suspend(endpoint, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	/* IPA v3.5.1 doesn't use channel stop for suspend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		dev_err(dev, "error %d suspending channel %u\n", ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 			endpoint->channel_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	struct device *dev = &endpoint->ipa->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	struct gsi *gsi = &endpoint->ipa->gsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	bool start_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	if (!endpoint->toward_ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 		(void)ipa_endpoint_program_suspend(endpoint, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	/* IPA v3.5.1 doesn't use channel start for resume */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 		dev_err(dev, "error %d resuming channel %u\n", ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 			endpoint->channel_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	else if (!endpoint->toward_ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 		ipa_endpoint_replenish_enable(endpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) void ipa_endpoint_suspend(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	if (!ipa->setup_complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	if (ipa->modem_netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		ipa_modem_suspend(ipa->modem_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	ipa_cmd_tag_process(ipa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) void ipa_endpoint_resume(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	if (!ipa->setup_complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	if (ipa->modem_netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 		ipa_modem_resume(ipa->modem_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	struct gsi *gsi = &endpoint->ipa->gsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	u32 channel_id = endpoint->channel_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	/* Only AP endpoints get set up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	if (endpoint->ee_id != GSI_EE_AP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	if (!endpoint->toward_ipa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		/* RX transactions require a single TRE, so the maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 		 * backlog is the same as the maximum outstanding TREs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 		clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 		atomic_set(&endpoint->replenish_saved,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 			   gsi_channel_tre_max(gsi, endpoint->channel_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		atomic_set(&endpoint->replenish_backlog, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		INIT_DELAYED_WORK(&endpoint->replenish_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 				  ipa_endpoint_replenish_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	ipa_endpoint_program(endpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	if (!endpoint->toward_ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		cancel_delayed_work_sync(&endpoint->replenish_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	ipa_endpoint_reset(endpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) void ipa_endpoint_setup(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	u32 initialized = ipa->initialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	ipa->set_up = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	while (initialized) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		u32 endpoint_id = __ffs(initialized);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		initialized ^= BIT(endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 		ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) void ipa_endpoint_teardown(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	u32 set_up = ipa->set_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	while (set_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 		u32 endpoint_id = __fls(set_up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 		set_up ^= BIT(endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 		ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	ipa->set_up = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) int ipa_endpoint_config(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	struct device *dev = &ipa->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	u32 initialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	u32 rx_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	u32 rx_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	u32 tx_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	u32 max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	/* Find out about the endpoints supplied by the hardware, and ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	 * the highest one doesn't exceed the number we support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	/* Our RX is an IPA producer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	rx_base = u32_get_bits(val, BAM_PROD_LOWEST_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	max = rx_base + u32_get_bits(val, BAM_MAX_PROD_PIPES_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	if (max > IPA_ENDPOINT_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 		dev_err(dev, "too many endpoints (%u > %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 			max, IPA_ENDPOINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	rx_mask = GENMASK(max - 1, rx_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	/* Our TX is an IPA consumer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	max = u32_get_bits(val, BAM_MAX_CONS_PIPES_FMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	tx_mask = GENMASK(max - 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	ipa->available = rx_mask | tx_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	/* Check for initialized endpoints not supported by the hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	if (ipa->initialized & ~ipa->available) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 		dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 			ipa->initialized & ~ipa->available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 		ret = -EINVAL;		/* Report other errors too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	initialized = ipa->initialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	while (initialized) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		u32 endpoint_id = __ffs(initialized);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 		struct ipa_endpoint *endpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 		initialized ^= BIT(endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 		/* Make sure it's pointing in the right direction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 		endpoint = &ipa->endpoint[endpoint_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 		if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 			dev_err(dev, "endpoint id %u wrong direction\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 				endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) void ipa_endpoint_deconfig(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	ipa->available = 0;	/* Nothing more to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 				  const struct ipa_gsi_endpoint_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	struct ipa_endpoint *endpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	endpoint = &ipa->endpoint[data->endpoint_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	if (data->ee_id == GSI_EE_AP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 		ipa->channel_map[data->channel_id] = endpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	ipa->name_map[name] = endpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	endpoint->ipa = ipa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	endpoint->ee_id = data->ee_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	endpoint->seq_type = data->endpoint.seq_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	endpoint->channel_id = data->channel_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	endpoint->endpoint_id = data->endpoint_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	endpoint->toward_ipa = data->toward_ipa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	endpoint->data = &data->endpoint.config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	ipa->initialized |= BIT(endpoint->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	memset(endpoint, 0, sizeof(*endpoint));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) void ipa_endpoint_exit(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	u32 initialized = ipa->initialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	while (initialized) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 		u32 endpoint_id = __fls(initialized);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 		initialized ^= BIT(endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 		ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	memset(ipa->name_map, 0, sizeof(ipa->name_map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) /* Returns a bitmask of endpoints that support filtering, or 0 on error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 		      const struct ipa_gsi_endpoint_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	enum ipa_endpoint_name name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	u32 filter_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	if (!ipa_endpoint_data_valid(ipa, count, data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 		return 0;	/* Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	ipa->initialized = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	filter_map = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	for (name = 0; name < count; name++, data++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 		if (ipa_gsi_endpoint_data_empty(data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 			continue;	/* Skip over empty slots */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 		ipa_endpoint_init_one(ipa, name, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 		if (data->endpoint.filter_support)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 			filter_map |= BIT(data->endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	if (!ipa_filter_map_valid(ipa, filter_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		goto err_endpoint_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	return filter_map;	/* Non-zero bitmask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) err_endpoint_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	ipa_endpoint_exit(ipa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	return 0;	/* Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) }