Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (C) ST-Ericsson AB 2010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Author:  Daniel Martensson
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *	    Dmitry.Tarnyagin  / dmitry.tarnyagin@lockless.no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #define pr_fmt(fmt) KBUILD_MODNAME fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/if_arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <net/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/pkt_sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <net/caif/caif_layer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <net/caif/caif_hsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) MODULE_AUTHOR("Daniel Martensson");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) MODULE_DESCRIPTION("CAIF HSI driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) /* Returns the number of padding bytes for alignment. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 				(((pow)-((x)&((pow)-1)))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) static const struct cfhsi_config  hsi_default_config = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	/* Inactivity timeout on HSI, ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	.inactivity_timeout = HZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	/* Aggregation timeout (ms) of zero means no aggregation is done*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	.aggregation_timeout = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	 * HSI link layer flow-control thresholds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	 * Threshold values for the HSI packet queue. Flow-control will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	 * asserted when the number of packets exceeds q_high_mark. It will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	 * not be de-asserted before the number of packets drops below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	 * q_low_mark.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	 * Warning: A high threshold value might increase throughput but it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	 * will at the same time prevent channel prioritization and increase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	 * the risk of flooding the modem. The high threshold should be above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	 * the low.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	.q_high_mark = 100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	.q_low_mark = 50,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	 * HSI padding options.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	 * Warning: must be a base of 2 (& operation used) and can not be zero !
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	.head_align = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	.tail_align = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define ON 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define OFF 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) static LIST_HEAD(cfhsi_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) static void cfhsi_inactivity_tout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	struct cfhsi *cfhsi = from_timer(cfhsi, t, inactivity_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	netdev_dbg(cfhsi->ndev, "%s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 		__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	/* Schedule power down work queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 		queue_work(cfhsi->wq, &cfhsi->wake_down_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 					   const struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 					   int direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	struct caif_payload_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	int hpad, tpad, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	info = (struct caif_payload_info *)&skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	len = skb->len + hpad + tpad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	if (direction > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 		cfhsi->aggregation_len += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	else if (direction < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 		cfhsi->aggregation_len -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	if (cfhsi->cfg.aggregation_timeout == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 		if (cfhsi->qhead[i].qlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	/* TODO: Use aggregation_len instead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	for (i = 0; i < CFHSI_PRIO_LAST; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		skb = skb_dequeue(&cfhsi->qhead[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) static int cfhsi_tx_queue_len(struct cfhsi *cfhsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	int i, len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	for (i = 0; i < CFHSI_PRIO_LAST; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		len += skb_queue_len(&cfhsi->qhead[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) static void cfhsi_abort_tx(struct cfhsi *cfhsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 		spin_lock_bh(&cfhsi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 		skb = cfhsi_dequeue(cfhsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 		cfhsi->ndev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		cfhsi->ndev->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		cfhsi_update_aggregation_stats(cfhsi, skb, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 		spin_unlock_bh(&cfhsi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		mod_timer(&cfhsi->inactivity_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 			jiffies + cfhsi->cfg.inactivity_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	spin_unlock_bh(&cfhsi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	char buffer[32]; /* Any reasonable value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	size_t fifo_occupancy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	netdev_dbg(cfhsi->ndev, "%s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 		__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 				&fifo_occupancy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 			netdev_warn(cfhsi->ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 				"%s: can't get FIFO occupancy: %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 				__func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		} else if (!fifo_occupancy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 			/* No more data, exitting normally */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 				cfhsi->ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 			clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 			netdev_warn(cfhsi->ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 				"%s: can't read data: %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 				__func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		ret = 5 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 			 !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 			netdev_warn(cfhsi->ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 				"%s: can't wait for flush complete: %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 				__func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		} else if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 			ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 			netdev_warn(cfhsi->ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 				"%s: timeout waiting for flush complete.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 				__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	} while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	int nfrms = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	int pld_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	skb = cfhsi_dequeue(cfhsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	/* Clear offset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	desc->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	/* Check if we can embed a CAIF frame. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		struct caif_payload_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 		int hpad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		int tpad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		/* Calculate needed head alignment and tail alignment. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		info = (struct caif_payload_info *)&skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		/* Check if frame still fits with added alignment. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 			u8 *pemb = desc->emb_frm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 			desc->offset = CFHSI_DESC_SHORT_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 			*pemb = (u8)(hpad - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 			pemb += hpad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 			/* Update network statistics. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 			spin_lock_bh(&cfhsi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 			cfhsi->ndev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 			cfhsi->ndev->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 			cfhsi_update_aggregation_stats(cfhsi, skb, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 			spin_unlock_bh(&cfhsi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 			/* Copy in embedded CAIF frame. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 			skb_copy_bits(skb, 0, pemb, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 			/* Consume the SKB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 			consume_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 			skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	/* Create payload CAIF frames. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	while (nfrms < CFHSI_MAX_PKTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		struct caif_payload_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		int hpad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		int tpad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 			skb = cfhsi_dequeue(cfhsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		/* Calculate needed head alignment and tail alignment. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		info = (struct caif_payload_info *)&skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		/* Fill in CAIF frame length in descriptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		/* Fill head padding information. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 		*pfrm = (u8)(hpad - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 		pfrm += hpad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		/* Update network statistics. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		spin_lock_bh(&cfhsi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		cfhsi->ndev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		cfhsi->ndev->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		cfhsi_update_aggregation_stats(cfhsi, skb, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		spin_unlock_bh(&cfhsi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		/* Copy in CAIF frame. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		skb_copy_bits(skb, 0, pfrm, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		/* Update payload length. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		pld_len += desc->cffrm_len[nfrms];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		/* Update frame pointer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		pfrm += skb->len + tpad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		/* Consume the SKB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		consume_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		/* Update number of frames. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		nfrms++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	/* Unused length fields should be zero-filled (according to SPEC). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	while (nfrms < CFHSI_MAX_PKTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		desc->cffrm_len[nfrms] = 0x0000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		nfrms++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	/* Check if we can piggy-back another descriptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	if (cfhsi_can_send_aggregate(cfhsi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		desc->header |= CFHSI_PIGGY_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		desc->header &= ~CFHSI_PIGGY_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	return CFHSI_DESC_SZ + pld_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) static void cfhsi_start_tx(struct cfhsi *cfhsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	int len, res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		/* Create HSI frame. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		len = cfhsi_tx_frm(desc, cfhsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		if (!len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 			spin_lock_bh(&cfhsi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 			if (unlikely(cfhsi_tx_queue_len(cfhsi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 				spin_unlock_bh(&cfhsi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 				res = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 			cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 			/* Start inactivity timer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 			mod_timer(&cfhsi->inactivity_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 				jiffies + cfhsi->cfg.inactivity_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 			spin_unlock_bh(&cfhsi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		/* Set up new transfer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		if (WARN_ON(res < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 			netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 				__func__, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	} while (res < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) static void cfhsi_tx_done(struct cfhsi *cfhsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	 * Send flow on if flow off has been previously signalled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	 * and number of packets is below low water mark.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	spin_lock_bh(&cfhsi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	if (cfhsi->flow_off_sent &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 			cfhsi_tx_queue_len(cfhsi) <= cfhsi->cfg.q_low_mark &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 			cfhsi->cfdev.flowctrl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		cfhsi->flow_off_sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	if (cfhsi_can_send_aggregate(cfhsi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		spin_unlock_bh(&cfhsi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		cfhsi_start_tx(cfhsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		mod_timer(&cfhsi->aggregation_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 			jiffies + cfhsi->cfg.aggregation_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		spin_unlock_bh(&cfhsi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) static void cfhsi_tx_done_cb(struct cfhsi_cb_ops *cb_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	struct cfhsi *cfhsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	netdev_dbg(cfhsi->ndev, "%s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	cfhsi_tx_done(cfhsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	int xfer_sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	int nfrms = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	u16 *plen = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	u8 *pfrm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	if ((desc->header & ~CFHSI_PIGGY_DESC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 			(desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 			__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	/* Check for embedded CAIF frame. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	if (desc->offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		int len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		pfrm = ((u8 *)desc) + desc->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		/* Remove offset padding. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		pfrm += *pfrm + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		/* Read length of CAIF frame (little endian). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		len = *pfrm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		len |= ((*(pfrm+1)) << 8) & 0xFF00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		len += 2;	/* Add FCS fields. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		/* Sanity check length of CAIF frame. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 			netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 				__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 			return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		/* Allocate SKB (OK even in IRQ context). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		skb = alloc_skb(len + 1, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 			netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 				__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		caif_assert(skb != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		skb_put_data(skb, pfrm, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		skb->protocol = htons(ETH_P_CAIF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		skb->dev = cfhsi->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		netif_rx_any_context(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		/* Update network statistics. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		cfhsi->ndev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		cfhsi->ndev->stats.rx_bytes += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	/* Calculate transfer length. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	plen = desc->cffrm_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	while (nfrms < CFHSI_MAX_PKTS && *plen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		xfer_sz += *plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		plen++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		nfrms++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	/* Check for piggy-backed descriptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	if (desc->header & CFHSI_PIGGY_DESC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		xfer_sz += CFHSI_DESC_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		netdev_err(cfhsi->ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 				"%s: Invalid payload len: %d, ignored.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 			__func__, xfer_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	return xfer_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) static int cfhsi_rx_desc_len(struct cfhsi_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	int xfer_sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	int nfrms = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	u16 *plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	if ((desc->header & ~CFHSI_PIGGY_DESC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 			(desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		pr_err("Invalid descriptor. %x %x\n", desc->header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 				desc->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	/* Calculate transfer length. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	plen = desc->cffrm_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	while (nfrms < CFHSI_MAX_PKTS && *plen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		xfer_sz += *plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		plen++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		nfrms++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	if (xfer_sz % 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		pr_err("Invalid payload len: %d, ignored.\n", xfer_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	return xfer_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	int rx_sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	int nfrms = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	u16 *plen = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	u8 *pfrm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	/* Sanity check header and offset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 			(desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 			__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	/* Set frame pointer to start of payload. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	plen = desc->cffrm_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	/* Skip already processed frames. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	while (nfrms < cfhsi->rx_state.nfrms) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		pfrm += *plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		rx_sz += *plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		plen++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		nfrms++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	/* Parse payload. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	while (nfrms < CFHSI_MAX_PKTS && *plen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		u8 *pcffrm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		/* CAIF frame starts after head padding. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		pcffrm = pfrm + *pfrm + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		/* Read length of CAIF frame (little endian). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		len = *pcffrm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		len += 2;	/* Add FCS fields. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		/* Sanity check length of CAIF frames. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 			netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 				__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 			return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		/* Allocate SKB (OK even in IRQ context). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		skb = alloc_skb(len + 1, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 			netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 				__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 			cfhsi->rx_state.nfrms = nfrms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		caif_assert(skb != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		skb_put_data(skb, pcffrm, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		skb->protocol = htons(ETH_P_CAIF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		skb->dev = cfhsi->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		netif_rx_any_context(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		/* Update network statistics. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		cfhsi->ndev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		cfhsi->ndev->stats.rx_bytes += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		pfrm += *plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		rx_sz += *plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		plen++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		nfrms++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	return rx_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) static void cfhsi_rx_done(struct cfhsi *cfhsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	int desc_pld_len = 0, rx_len, rx_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	struct cfhsi_desc *desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	u8 *rx_ptr, *rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	struct cfhsi_desc *piggy_desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	desc = (struct cfhsi_desc *)cfhsi->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	netdev_dbg(cfhsi->ndev, "%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	/* Update inactivity timer if pending. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	spin_lock_bh(&cfhsi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	mod_timer_pending(&cfhsi->inactivity_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 			jiffies + cfhsi->cfg.inactivity_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	spin_unlock_bh(&cfhsi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		desc_pld_len = cfhsi_rx_desc_len(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		if (desc_pld_len < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 			goto out_of_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		rx_buf = cfhsi->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		rx_len = desc_pld_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		if (desc_pld_len > 0 && (desc->header & CFHSI_PIGGY_DESC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 			rx_len += CFHSI_DESC_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		if (desc_pld_len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 			rx_buf = cfhsi->rx_flip_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		rx_buf = cfhsi->rx_flip_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		rx_len = CFHSI_DESC_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		if (cfhsi->rx_state.pld_len > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 				(desc->header & CFHSI_PIGGY_DESC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 			piggy_desc = (struct cfhsi_desc *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 				(desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 						cfhsi->rx_state.pld_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 			cfhsi->rx_state.piggy_desc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 			/* Extract payload len from piggy-backed descriptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 			desc_pld_len = cfhsi_rx_desc_len(piggy_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 			if (desc_pld_len < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 				goto out_of_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 			if (desc_pld_len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 				rx_len = desc_pld_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 				if (piggy_desc->header & CFHSI_PIGGY_DESC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 					rx_len += CFHSI_DESC_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 			 * Copy needed information from the piggy-backed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 			 * descriptor to the descriptor in the start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 			memcpy(rx_buf, (u8 *)piggy_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 					CFHSI_DESC_SHORT_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	if (desc_pld_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		rx_state = CFHSI_RX_STATE_PAYLOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		rx_ptr = rx_buf + CFHSI_DESC_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		rx_state = CFHSI_RX_STATE_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		rx_ptr = rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		rx_len = CFHSI_DESC_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	/* Initiate next read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		/* Set up new transfer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		netdev_dbg(cfhsi->ndev, "%s: Start RX.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 				__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 				cfhsi->ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		if (WARN_ON(res < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 			netdev_err(cfhsi->ndev, "%s: RX error %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 				__func__, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 			cfhsi->ndev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 			cfhsi->ndev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		/* Extract payload from descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		if (cfhsi_rx_desc(desc, cfhsi) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 			goto out_of_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		/* Extract payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		if (cfhsi_rx_pld(desc, cfhsi) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 			goto out_of_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		if (piggy_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 			/* Extract any payload in piggyback descriptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 			if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 				goto out_of_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 			/* Mark no embedded frame after extracting it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 			piggy_desc->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	/* Update state info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	cfhsi->rx_state.state = rx_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	cfhsi->rx_ptr = rx_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	cfhsi->rx_len = rx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	cfhsi->rx_state.pld_len = desc_pld_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	if (rx_buf != cfhsi->rx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		swap(cfhsi->rx_buf, cfhsi->rx_flip_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) out_of_sync:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 			cfhsi->rx_buf, CFHSI_DESC_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	schedule_work(&cfhsi->out_of_sync_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) static void cfhsi_rx_slowpath(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	struct cfhsi *cfhsi = from_timer(cfhsi, t, rx_slowpath_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	netdev_dbg(cfhsi->ndev, "%s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	cfhsi_rx_done(cfhsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) static void cfhsi_rx_done_cb(struct cfhsi_cb_ops *cb_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	struct cfhsi *cfhsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	netdev_dbg(cfhsi->ndev, "%s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		wake_up_interruptible(&cfhsi->flush_fifo_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		cfhsi_rx_done(cfhsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) static void cfhsi_wake_up(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	struct cfhsi *cfhsi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	cfhsi = container_of(work, struct cfhsi, wake_up_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		/* It happenes when wakeup is requested by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		 * both ends at the same time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	/* Activate wake line. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	cfhsi->ops->cfhsi_wake_up(cfhsi->ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	/* Wait for acknowledge. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	ret = CFHSI_WAKE_TOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 					test_and_clear_bit(CFHSI_WAKE_UP_ACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 							&cfhsi->bits), ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	if (unlikely(ret < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		/* Interrupted by signal. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 			__func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	} else if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		bool ca_wake = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		size_t fifo_occupancy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		/* Wakeup timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		netdev_dbg(cfhsi->ndev, "%s: Timeout.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 			__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		/* Check FIFO to check if modem has sent something. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 					&fifo_occupancy));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 				__func__, (unsigned) fifo_occupancy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		/* Check if we misssed the interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 							&ca_wake));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		if (ca_wake) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 			netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 				__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 			/* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 			clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 			/* Continue execution. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 			goto wake_ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) wake_ack:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	netdev_dbg(cfhsi->ndev, "%s: Woken.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	/* Clear power up bit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	set_bit(CFHSI_AWAKE, &cfhsi->bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	/* Resume read operation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	if (WARN_ON(res < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	/* Clear power up acknowledment. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	spin_lock_bh(&cfhsi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	/* Resume transmit if queues are not empty. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	if (!cfhsi_tx_queue_len(cfhsi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 			__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		/* Start inactivity timer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		mod_timer(&cfhsi->inactivity_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 				jiffies + cfhsi->cfg.inactivity_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		spin_unlock_bh(&cfhsi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	netdev_dbg(cfhsi->ndev, "%s: Host wake.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	spin_unlock_bh(&cfhsi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	/* Create HSI frame. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	if (likely(len > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		/* Set up new transfer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		if (WARN_ON(res < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 			netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 				__func__, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 			cfhsi_abort_tx(cfhsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		netdev_err(cfhsi->ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 				"%s: Failed to create HSI frame: %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 				__func__, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) static void cfhsi_wake_down(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	struct cfhsi *cfhsi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	size_t fifo_occupancy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	int retry = CFHSI_WAKE_TOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	cfhsi = container_of(work, struct cfhsi, wake_down_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	/* Deactivate wake line. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	/* Wait for acknowledge. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	ret = CFHSI_WAKE_TOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 					test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 							&cfhsi->bits), ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		/* Interrupted by signal. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 			__func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	} else if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		bool ca_wake = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		/* Timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		/* Check if we misssed the interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 							&ca_wake));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		if (!ca_wake)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 			netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 				__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	/* Check FIFO occupancy. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	while (retry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 							&fifo_occupancy));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		if (!fifo_occupancy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		schedule_timeout(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		retry--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	if (!retry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	/* Clear AWAKE condition. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	clear_bit(CFHSI_AWAKE, &cfhsi->bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	/* Cancel pending RX requests. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) static void cfhsi_out_of_sync(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	struct cfhsi *cfhsi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	cfhsi = container_of(work, struct cfhsi, out_of_sync_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	dev_close(cfhsi->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) static void cfhsi_wake_up_cb(struct cfhsi_cb_ops *cb_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	struct cfhsi *cfhsi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	netdev_dbg(cfhsi->ndev, "%s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	wake_up_interruptible(&cfhsi->wake_up_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	/* Schedule wake up work queue if the peer initiates. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		queue_work(cfhsi->wq, &cfhsi->wake_up_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	struct cfhsi *cfhsi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	netdev_dbg(cfhsi->ndev, "%s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	/* Initiating low power is only permitted by the host (us). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	wake_up_interruptible(&cfhsi->wake_down_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) static void cfhsi_aggregation_tout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	struct cfhsi *cfhsi = from_timer(cfhsi, t, aggregation_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	netdev_dbg(cfhsi->ndev, "%s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	cfhsi_start_tx(cfhsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) static netdev_tx_t cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	struct cfhsi *cfhsi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	int start_xfer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	int timer_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	int prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	cfhsi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	switch (skb->priority) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	case TC_PRIO_BESTEFFORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	case TC_PRIO_FILLER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	case TC_PRIO_BULK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		prio = CFHSI_PRIO_BEBK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	case TC_PRIO_INTERACTIVE_BULK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		prio = CFHSI_PRIO_VI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	case TC_PRIO_INTERACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		prio = CFHSI_PRIO_VO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	case TC_PRIO_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		prio = CFHSI_PRIO_CTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	spin_lock_bh(&cfhsi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	/* Update aggregation statistics  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	cfhsi_update_aggregation_stats(cfhsi, skb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	/* Queue the SKB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	skb_queue_tail(&cfhsi->qhead[prio], skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	/* Sanity check; xmit should not be called after unregister_netdev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		spin_unlock_bh(&cfhsi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		cfhsi_abort_tx(cfhsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	/* Send flow off if number of packets is above high water mark. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	if (!cfhsi->flow_off_sent &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		cfhsi_tx_queue_len(cfhsi) > cfhsi->cfg.q_high_mark &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		cfhsi->cfdev.flowctrl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		cfhsi->flow_off_sent = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		cfhsi->tx_state = CFHSI_TX_STATE_XFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		start_xfer = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	if (!start_xfer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		/* Send aggregate if it is possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		bool aggregate_ready =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 			cfhsi_can_send_aggregate(cfhsi) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 			del_timer(&cfhsi->aggregation_timer) > 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		spin_unlock_bh(&cfhsi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		if (aggregate_ready)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 			cfhsi_start_tx(cfhsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	/* Delete inactivity timer if started. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	timer_active = del_timer_sync(&cfhsi->inactivity_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	spin_unlock_bh(&cfhsi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	if (timer_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		/* Create HSI frame. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		len = cfhsi_tx_frm(desc, cfhsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		WARN_ON(!len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		/* Set up new transfer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		if (WARN_ON(res < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 			netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 				__func__, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 			cfhsi_abort_tx(cfhsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		/* Schedule wake up work queue if the we initiate. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 			queue_work(cfhsi->wq, &cfhsi->wake_up_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) static const struct net_device_ops cfhsi_netdevops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) static void cfhsi_setup(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	struct cfhsi *cfhsi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	dev->features = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	dev->type = ARPHRD_CAIF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	dev->priv_flags |= IFF_NO_QUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	dev->needs_free_netdev = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	dev->netdev_ops = &cfhsi_netdevops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	for (i = 0; i < CFHSI_PRIO_LAST; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		skb_queue_head_init(&cfhsi->qhead[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	cfhsi->cfdev.use_frag = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	cfhsi->cfdev.use_stx = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	cfhsi->cfdev.use_fcs = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	cfhsi->ndev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	cfhsi->cfg = hsi_default_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) static int cfhsi_open(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	struct cfhsi *cfhsi = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	/* Initialize state vaiables. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	/* Set flow info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	cfhsi->flow_off_sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	 * Allocate a TX buffer with the size of a HSI packet descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	 * and the necessary room for CAIF payload frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	if (!cfhsi->tx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		res = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		goto err_alloc_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	 * Allocate a RX buffer with the size of two HSI packet descriptors and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	 * the necessary room for CAIF payload frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	if (!cfhsi->rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		res = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		goto err_alloc_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	cfhsi->rx_flip_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	if (!cfhsi->rx_flip_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		res = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		goto err_alloc_rx_flip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	/* Initialize aggregation timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	cfhsi->cfg.aggregation_timeout = hsi_default_config.aggregation_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	/* Initialize recieve vaiables. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	cfhsi->rx_ptr = cfhsi->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	cfhsi->rx_len = CFHSI_DESC_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	/* Initialize spin locks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	spin_lock_init(&cfhsi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	/* Set up the driver. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	cfhsi->cb_ops.tx_done_cb = cfhsi_tx_done_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	cfhsi->cb_ops.rx_done_cb = cfhsi_rx_done_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	cfhsi->cb_ops.wake_up_cb = cfhsi_wake_up_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	cfhsi->cb_ops.wake_down_cb = cfhsi_wake_down_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	/* Initialize the work queues. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	/* Clear all bit fields. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	clear_bit(CFHSI_AWAKE, &cfhsi->bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	/* Create work thread. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	cfhsi->wq = alloc_ordered_workqueue(cfhsi->ndev->name, WQ_MEM_RECLAIM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	if (!cfhsi->wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 			__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		res = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		goto err_create_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	/* Initialize wait queues. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	init_waitqueue_head(&cfhsi->wake_up_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	init_waitqueue_head(&cfhsi->wake_down_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	init_waitqueue_head(&cfhsi->flush_fifo_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	/* Setup the inactivity timer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	timer_setup(&cfhsi->inactivity_timer, cfhsi_inactivity_tout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	/* Setup the slowpath RX timer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	timer_setup(&cfhsi->rx_slowpath_timer, cfhsi_rx_slowpath, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	/* Setup the aggregation timer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	timer_setup(&cfhsi->aggregation_timer, cfhsi_aggregation_tout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	/* Activate HSI interface. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	res = cfhsi->ops->cfhsi_up(cfhsi->ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		netdev_err(cfhsi->ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 			"%s: can't activate HSI interface: %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 			__func__, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		goto err_activate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	/* Flush FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	res = cfhsi_flush_fifo(cfhsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 			__func__, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		goto err_net_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)  err_net_reg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	cfhsi->ops->cfhsi_down(cfhsi->ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)  err_activate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	destroy_workqueue(cfhsi->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)  err_create_wq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	kfree(cfhsi->rx_flip_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)  err_alloc_rx_flip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	kfree(cfhsi->rx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)  err_alloc_rx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	kfree(cfhsi->tx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)  err_alloc_tx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) static int cfhsi_close(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	struct cfhsi *cfhsi = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	u8 *tx_buf, *rx_buf, *flip_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	/* going to shutdown driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	/* Delete timers if pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	del_timer_sync(&cfhsi->inactivity_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	del_timer_sync(&cfhsi->rx_slowpath_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	del_timer_sync(&cfhsi->aggregation_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	/* Cancel pending RX request (if any) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	/* Destroy workqueue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	destroy_workqueue(cfhsi->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	/* Store bufferes: will be freed later. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	tx_buf = cfhsi->tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	rx_buf = cfhsi->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	flip_buf = cfhsi->rx_flip_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	/* Flush transmit queues. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	cfhsi_abort_tx(cfhsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	/* Deactivate interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	cfhsi->ops->cfhsi_down(cfhsi->ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	/* Free buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	kfree(tx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	kfree(rx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	kfree(flip_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) static void cfhsi_uninit(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	struct cfhsi *cfhsi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	symbol_put(cfhsi_get_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	list_del(&cfhsi->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) static const struct net_device_ops cfhsi_netdevops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	.ndo_uninit = cfhsi_uninit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	.ndo_open = cfhsi_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	.ndo_stop = cfhsi_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	.ndo_start_xmit = cfhsi_xmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	if (!data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		pr_debug("no params data found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	i = __IFLA_CAIF_HSI_INACTIVITY_TOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	 * Inactivity timeout in millisecs. Lowest possible value is 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	 * and highest possible is NEXT_TIMER_MAX_DELTA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	if (data[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		u32 inactivity_timeout = nla_get_u32(data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		/* Pre-calculate inactivity timeout. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		cfhsi->cfg.inactivity_timeout =	inactivity_timeout * HZ / 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		if (cfhsi->cfg.inactivity_timeout == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 			cfhsi->cfg.inactivity_timeout = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 		else if (cfhsi->cfg.inactivity_timeout > NEXT_TIMER_MAX_DELTA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 			cfhsi->cfg.inactivity_timeout = NEXT_TIMER_MAX_DELTA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	i = __IFLA_CAIF_HSI_AGGREGATION_TOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	if (data[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		cfhsi->cfg.aggregation_timeout = nla_get_u32(data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	i = __IFLA_CAIF_HSI_HEAD_ALIGN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	if (data[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		cfhsi->cfg.head_align = nla_get_u32(data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	i = __IFLA_CAIF_HSI_TAIL_ALIGN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	if (data[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		cfhsi->cfg.tail_align = nla_get_u32(data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	i = __IFLA_CAIF_HSI_QHIGH_WATERMARK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	if (data[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		cfhsi->cfg.q_high_mark = nla_get_u32(data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	i = __IFLA_CAIF_HSI_QLOW_WATERMARK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	if (data[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		cfhsi->cfg.q_low_mark = nla_get_u32(data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 			       struct nlattr *data[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 			       struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	cfhsi_netlink_parms(data, netdev_priv(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	netdev_state_change(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) static const struct nla_policy caif_hsi_policy[__IFLA_CAIF_HSI_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	[__IFLA_CAIF_HSI_INACTIVITY_TOUT] = { .type = NLA_U32, .len = 4 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	[__IFLA_CAIF_HSI_AGGREGATION_TOUT] = { .type = NLA_U32, .len = 4 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	[__IFLA_CAIF_HSI_HEAD_ALIGN] = { .type = NLA_U32, .len = 4 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	[__IFLA_CAIF_HSI_TAIL_ALIGN] = { .type = NLA_U32, .len = 4 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	[__IFLA_CAIF_HSI_QHIGH_WATERMARK] = { .type = NLA_U32, .len = 4 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	[__IFLA_CAIF_HSI_QLOW_WATERMARK] = { .type = NLA_U32, .len = 4 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) static size_t caif_hsi_get_size(const struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	size_t s = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	for (i = __IFLA_CAIF_HSI_UNSPEC + 1; i < __IFLA_CAIF_HSI_MAX; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		s += nla_total_size(caif_hsi_policy[i].len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	return s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	struct cfhsi *cfhsi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 			cfhsi->cfg.inactivity_timeout) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	    nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 			cfhsi->cfg.aggregation_timeout) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	    nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 			cfhsi->cfg.head_align) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	    nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 			cfhsi->cfg.tail_align) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	    nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 			cfhsi->cfg.q_high_mark) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	    nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 			cfhsi->cfg.q_low_mark))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 			    struct nlattr *tb[], struct nlattr *data[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 			    struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	struct cfhsi *cfhsi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	struct cfhsi_ops *(*get_ops)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	cfhsi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	cfhsi_netlink_parms(data, cfhsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	get_ops = symbol_get(cfhsi_get_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	if (!get_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 		pr_err("%s: failed to get the cfhsi_ops\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	/* Assign the HSI device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	cfhsi->ops = (*get_ops)();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	if (!cfhsi->ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		pr_err("%s: failed to get the cfhsi_ops\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	/* Assign the driver to this HSI device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	cfhsi->ops->cb_ops = &cfhsi->cb_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	if (register_netdevice(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		pr_warn("%s: caif_hsi device registration failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	/* Add CAIF HSI device to list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	list_add_tail(&cfhsi->list, &cfhsi_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	symbol_put(cfhsi_get_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	.kind		= "cfhsi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	.priv_size	= sizeof(struct cfhsi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	.setup		= cfhsi_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	.maxtype	= __IFLA_CAIF_HSI_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	.policy	= caif_hsi_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	.newlink	= caif_hsi_newlink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	.changelink	= caif_hsi_changelink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	.get_size	= caif_hsi_get_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	.fill_info	= caif_hsi_fill_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) static void __exit cfhsi_exit_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	struct list_head *list_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	struct list_head *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	struct cfhsi *cfhsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	rtnl_link_unregister(&caif_hsi_link_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	list_for_each_safe(list_node, n, &cfhsi_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		cfhsi = list_entry(list_node, struct cfhsi, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		unregister_netdevice(cfhsi->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) static int __init cfhsi_init_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	return rtnl_link_register(&caif_hsi_link_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) module_init(cfhsi_init_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) module_exit(cfhsi_exit_module);