Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * HT handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Copyright 2002-2005, Instant802 Networks, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Copyright 2005-2006, Devicescape Software, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * Copyright 2007-2010, Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * Copyright(c) 2015-2017 Intel Deutschland GmbH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * Copyright (C) 2018 - 2022 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/ieee80211.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <net/mac80211.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include "ieee80211_i.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include "driver-ops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include "wme.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  * DOC: TX A-MPDU aggregation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  * Aggregation on the TX side requires setting the hardware flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  * %IEEE80211_HW_AMPDU_AGGREGATION. The driver will then be handed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  * packets with a flag indicating A-MPDU aggregation. The driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  * or device is responsible for actually aggregating the frames,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  * as well as deciding how many and which to aggregate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  * When TX aggregation is started by some subsystem (usually the rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  * control algorithm would be appropriate) by calling the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  * ieee80211_start_tx_ba_session() function, the driver will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  * notified via its @ampdu_action function, with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  * %IEEE80211_AMPDU_TX_START action.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  * In response to that, the driver is later required to call the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  * ieee80211_start_tx_ba_cb_irqsafe() function, which will really
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  * start the aggregation session after the peer has also responded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  * If the peer responds negatively, the session will be stopped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  * again right away. Note that it is possible for the aggregation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  * session to be stopped before the driver has indicated that it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  * is done setting it up, in which case it must not indicate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  * setup completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  * Also note that, since we also need to wait for a response from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  * the peer, the driver is notified of the completion of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  * handshake by the %IEEE80211_AMPDU_TX_OPERATIONAL action to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  * @ampdu_action callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  * Similarly, when the aggregation session is stopped by the peer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  * or something calling ieee80211_stop_tx_ba_session(), the driver's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  * @ampdu_action function will be called with the action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  * %IEEE80211_AMPDU_TX_STOP. In this case, the call must not fail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  * and the driver must later call ieee80211_stop_tx_ba_cb_irqsafe().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  * Note that the sta can get destroyed before the BA tear down is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58)  * complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 					 const u8 *da, u16 tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 					 u8 dialog_token, u16 start_seq_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 					 u16 agg_size, u16 timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	struct ieee80211_mgmt *mgmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	u16 capab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	skb_reserve(skb, local->hw.extra_tx_headroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	mgmt = skb_put_zero(skb, 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	memcpy(mgmt->da, da, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	if (sdata->vif.type == NL80211_IFTYPE_AP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	    sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	    sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 		memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	else if (sdata->vif.type == NL80211_IFTYPE_STATION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 		memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 		memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 					  IEEE80211_STYPE_ACTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	mgmt->u.action.category = WLAN_CATEGORY_BACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	mgmt->u.action.u.addba_req.dialog_token = dialog_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	capab = (u16)(1 << 0);		/* bit 0 A-MSDU support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	capab |= (u16)(1 << 1);		/* bit 1 aggregation policy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	capab |= (u16)(tid << 2); 	/* bit 5:2 TID number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	capab |= (u16)(agg_size << 6);	/* bit 15:6 max size of aggergation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	mgmt->u.action.u.addba_req.start_seq_num =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 					cpu_to_le16(start_seq_num << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	ieee80211_tx_skb_tid(sdata, skb, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	struct ieee80211_bar *bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	u16 bar_control = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	skb_reserve(skb, local->hw.extra_tx_headroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	bar = skb_put_zero(skb, sizeof(*bar));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 					 IEEE80211_STYPE_BACK_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	memcpy(bar->ra, ra, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	memcpy(bar->ta, sdata->vif.addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	bar_control |= (u16)(tid << IEEE80211_BAR_CTRL_TID_INFO_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	bar->control = cpu_to_le16(bar_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	bar->start_seq_num = cpu_to_le16(ssn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 					IEEE80211_TX_CTL_REQ_TX_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	ieee80211_tx_skb_tid(sdata, skb, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) EXPORT_SYMBOL(ieee80211_send_bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 			     struct tid_ampdu_tx *tid_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	lockdep_assert_held(&sta->ampdu_mlme.mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	lockdep_assert_held(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151)  * When multiple aggregation sessions on multiple stations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152)  * are being created/destroyed simultaneously, we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153)  * refcount the global queue stop caused by that in order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154)  * to not get into a situation where one of the aggregation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155)  * setup or teardown re-enables queues before the other is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  * ready to handle that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158)  * These two functions take care of this issue by keeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159)  * a global "agg_queue_stop" refcount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) static void __acquires(agg_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	/* we do refcounting here, so don't use the queue reason refcounting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 		ieee80211_stop_queue_by_reason(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 			&sdata->local->hw, queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 			IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 			false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	__acquire(agg_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) static void __releases(agg_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		ieee80211_wake_queue_by_reason(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 			&sdata->local->hw, queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 			IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 			false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	__release(agg_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) ieee80211_agg_stop_txq(struct sta_info *sta, int tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	struct ieee80211_txq *txq = sta->sta.txq[tid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	struct ieee80211_sub_if_data *sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	struct fq *fq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	struct txq_info *txqi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	if (!txq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	txqi = to_txq_info(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	sdata = vif_to_sdata(txq->vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	fq = &sdata->local->fq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	/* Lock here to protect against further seqno updates on dequeue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	spin_lock_bh(&fq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	set_bit(IEEE80211_TXQ_STOP, &txqi->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	spin_unlock_bh(&fq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) ieee80211_agg_start_txq(struct sta_info *sta, int tid, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	struct ieee80211_txq *txq = sta->sta.txq[tid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	struct txq_info *txqi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	lockdep_assert_held(&sta->ampdu_mlme.mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	if (!txq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	txqi = to_txq_info(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		set_bit(IEEE80211_TXQ_AMPDU, &txqi->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		clear_bit(IEEE80211_TXQ_AMPDU, &txqi->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	clear_bit(IEEE80211_TXQ_STOP, &txqi->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	schedule_and_wake_txq(sta->sdata->local, txqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237)  * splice packets from the STA's pending to the local pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238)  * requires a call to ieee80211_agg_splice_finish later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) static void __acquires(agg_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 			     struct tid_ampdu_tx *tid_tx, u16 tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	ieee80211_stop_queue_agg(sdata, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	if (WARN(!tid_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 		 "TID %d gone but expected when splicing aggregates from the pending queue\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		 tid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	if (!skb_queue_empty(&tid_tx->pending)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		/* copy over remaining packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		skb_queue_splice_tail_init(&tid_tx->pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 					   &local->pending[queue]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) static void __releases(agg_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	ieee80211_wake_queue_agg(sdata, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) static void ieee80211_remove_tid_tx(struct sta_info *sta, int tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	struct tid_ampdu_tx *tid_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	lockdep_assert_held(&sta->ampdu_mlme.mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	lockdep_assert_held(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	 * When we get here, the TX path will not be lockless any more wrt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	 * aggregation, since the OPERATIONAL bit has long been cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	 * Thus it will block on getting the lock, if it occurs. So if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	 * stop the queue now, we will not get any more packets, and any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	 * that might be being processed will wait for us here, thereby
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	 * guaranteeing that no packets go to the tid_tx pending queue any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	 * more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	/* future packets must not find the tid_tx struct any more */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	ieee80211_assign_tid_tx(sta, tid, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	ieee80211_agg_splice_finish(sta->sdata, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	kfree_rcu(tid_tx, rcu_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 				    enum ieee80211_agg_stop_reason reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	struct ieee80211_local *local = sta->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	struct tid_ampdu_tx *tid_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	struct ieee80211_ampdu_params params = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		.sta = &sta->sta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 		.tid = tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		.buf_size = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		.amsdu = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		.timeout = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		.ssn = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	lockdep_assert_held(&sta->ampdu_mlme.mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	switch (reason) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	case AGG_STOP_DECLINED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	case AGG_STOP_LOCAL_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	case AGG_STOP_PEER_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		params.action = IEEE80211_AMPDU_TX_STOP_CONT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	case AGG_STOP_DESTROY_STA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		params.action = IEEE80211_AMPDU_TX_STOP_FLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	spin_lock_bh(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	/* free struct pending for start, if present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	tid_tx = sta->ampdu_mlme.tid_start_tx[tid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	kfree(tid_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	sta->ampdu_mlme.tid_start_tx[tid] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	if (!tid_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		spin_unlock_bh(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	 * if we're already stopping ignore any new requests to stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	 * unless we're destroying it in which case notify the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		spin_unlock_bh(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		if (reason != AGG_STOP_DESTROY_STA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 			return -EALREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		params.action = IEEE80211_AMPDU_TX_STOP_FLUSH_CONT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		ret = drv_ampdu_action(local, sta->sdata, &params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		WARN_ON_ONCE(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		/* not even started yet! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		ieee80211_assign_tid_tx(sta, tid, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		spin_unlock_bh(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		kfree_rcu(tid_tx, rcu_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	ieee80211_agg_stop_txq(sta, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	spin_unlock_bh(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	       sta->sta.addr, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	del_timer_sync(&tid_tx->addba_resp_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	del_timer_sync(&tid_tx->session_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	 * After this packets are no longer handed right through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	 * to the driver but are put onto tid_tx->pending instead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	 * with locking to ensure proper access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	 * There might be a few packets being processed right now (on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	 * another CPU) that have already gotten past the aggregation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	 * check when it was still OPERATIONAL and consequently have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	 * IEEE80211_TX_CTL_AMPDU set. In that case, this code might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	 * call into the driver at the same time or even before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	 * TX paths calls into it, which could confuse the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	 * Wait for all currently running TX paths to finish before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	 * telling the driver. New packets will not go through since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	 * the aggregation session is no longer OPERATIONAL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	if (!local->in_reconfig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		synchronize_net();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	tid_tx->stop_initiator = reason == AGG_STOP_PEER_REQUEST ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 					WLAN_BACK_RECIPIENT :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 					WLAN_BACK_INITIATOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	ret = drv_ampdu_action(local, sta->sdata, &params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	/* HW shall not deny going back to legacy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	if (WARN_ON(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		 * We may have pending packets get stuck in this case...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		 * Not bothering with a workaround for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	 * In the case of AGG_STOP_DESTROY_STA, the driver won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	 * necessarily call ieee80211_stop_tx_ba_cb(), so this may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	 * seem like we can leave the tid_tx data pending forever.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	 * This is true, in a way, but "forever" is only until the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	 * station struct is actually destroyed. In the meantime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	 * leaving it around ensures that we don't transmit packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	 * to the driver on this TID which might confuse it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428)  * After sending add Block Ack request we activated a timer until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429)  * add Block Ack response will arrive from the recipient.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430)  * If this timer expires sta_addba_resp_timer_expired will be executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) static void sta_addba_resp_timer_expired(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	struct tid_ampdu_tx *tid_tx = from_timer(tid_tx, t, addba_resp_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	struct sta_info *sta = tid_tx->sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	u8 tid = tid_tx->tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	/* check if the TID waits for addBA response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		ht_dbg(sta->sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		       "timer expired on %pM tid %d not expecting addBA response\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		       sta->sta.addr, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	ht_dbg(sta->sdata, "addBA response timer expired on %pM tid %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	       sta->sta.addr, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	ieee80211_stop_tx_ba_session(&sta->sta, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) static void ieee80211_send_addba_with_timeout(struct sta_info *sta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 					      struct tid_ampdu_tx *tid_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	struct ieee80211_sub_if_data *sdata = sta->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	struct ieee80211_local *local = sta->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	u8 tid = tid_tx->tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	u16 buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	/* activate the timer for the recipient's addBA response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	ht_dbg(sdata, "activated addBA response timer on %pM tid %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	       sta->sta.addr, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	spin_lock_bh(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	sta->ampdu_mlme.last_addba_req_time[tid] = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	sta->ampdu_mlme.addba_req_num[tid]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	spin_unlock_bh(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	if (sta->sta.he_cap.has_he) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		buf_size = local->hw.max_tx_aggregation_subframes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		 * We really should use what the driver told us it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		 * transmit as the maximum, but certain APs (e.g. the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		 * LinkSys WRT120N with FW v1.0.07 build 002 Jun 18 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		 * will crash when we use a lower number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		buf_size = IEEE80211_MAX_AMPDU_BUF_HT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	/* send AddBA request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 				     tid_tx->dialog_token, tid_tx->ssn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 				     buf_size, tid_tx->timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	WARN_ON(test_and_set_bit(HT_AGG_STATE_SENT_ADDBA, &tid_tx->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	struct tid_ampdu_tx *tid_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	struct ieee80211_local *local = sta->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	struct ieee80211_sub_if_data *sdata = sta->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	struct ieee80211_ampdu_params params = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		.sta = &sta->sta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		.action = IEEE80211_AMPDU_TX_START,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		.tid = tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		.buf_size = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		.amsdu = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		.timeout = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	 * Start queuing up packets for this aggregation session.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	 * We're going to release them once the driver is OK with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	 * that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	ieee80211_agg_stop_txq(sta, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	 * Make sure no packets are being processed. This ensures that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	 * we have a valid starting sequence number and that in-flight
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	 * packets have been flushed out and no packets for this TID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	 * will go into the driver during the ampdu_action call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	synchronize_net();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	params.ssn = sta->tid_seq[tid] >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	ret = drv_ampdu_action(local, sdata, &params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	tid_tx->ssn = params.ssn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	if (ret == IEEE80211_AMPDU_TX_START_DELAY_ADDBA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	} else if (ret == IEEE80211_AMPDU_TX_START_IMMEDIATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		 * We didn't send the request yet, so don't need to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		 * here if we already got a response, just mark as driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		 * ready immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	} else if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		ht_dbg(sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		       "BA request denied - HW unavailable for %pM tid %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		       sta->sta.addr, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		spin_lock_bh(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		ieee80211_agg_splice_packets(sdata, tid_tx, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		ieee80211_assign_tid_tx(sta, tid, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		ieee80211_agg_splice_finish(sdata, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		spin_unlock_bh(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		ieee80211_agg_start_txq(sta, tid, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		kfree_rcu(tid_tx, rcu_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	ieee80211_send_addba_with_timeout(sta, tid_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556)  * After accepting the AddBA Response we activated a timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557)  * resetting it after each frame that we send.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) static void sta_tx_agg_session_timer_expired(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	struct tid_ampdu_tx *tid_tx = from_timer(tid_tx, t, session_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	struct sta_info *sta = tid_tx->sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	u8 tid = tid_tx->tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	if (time_is_after_jiffies(timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		mod_timer(&tid_tx->session_timer, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	ht_dbg(sta->sdata, "tx session timer expired on %pM tid %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	       sta->sta.addr, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	ieee80211_stop_tx_ba_session(&sta->sta, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 				  u16 timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	struct ieee80211_sub_if_data *sdata = sta->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	struct tid_ampdu_tx *tid_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	trace_api_start_tx_ba_session(pubsta, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	if (WARN(sta->reserved_tid == tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		 "Requested to start BA session on reserved tid=%d", tid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	if (!pubsta->ht_cap.ht_supported &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	    sta->sdata->vif.bss_conf.chandef.chan->band != NL80211_BAND_6GHZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	if (WARN_ON_ONCE(!local->ops->ampdu_action))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	if ((tid >= IEEE80211_NUM_TIDS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	    !ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	    ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	if (WARN_ON(tid >= IEEE80211_FIRST_TSPEC_TSID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	ht_dbg(sdata, "Open BA session requested for %pM tid %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	       pubsta->addr, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	if (sdata->vif.type != NL80211_IFTYPE_STATION &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	    sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	    sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	    sdata->vif.type != NL80211_IFTYPE_AP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	    sdata->vif.type != NL80211_IFTYPE_ADHOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		ht_dbg(sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		       "BA sessions blocked - Denying BA session request %pM tid %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		       sta->sta.addr, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	if (test_sta_flag(sta, WLAN_STA_MFP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	    !test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		ht_dbg(sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		       "MFP STA not authorized - deny BA session request %pM tid %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		       sta->sta.addr, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	 * 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	 * member of an IBSS, and has no other existing Block Ack agreement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	 * with the recipient STA, then the initiating STA shall transmit a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	 * Probe Request frame to the recipient STA and shall not transmit an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	 * ADDBA Request frame unless it receives a Probe Response frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	 * from the recipient within dot11ADDBAFailureTimeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	 * The probe request mechanism for ADDBA is currently not implemented,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	 * but we only build up Block Ack session with HT STAs. This information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	 * is set when we receive a bss info from a probe response or a beacon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	    !sta->sta.ht_cap.ht_supported) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		ht_dbg(sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		       "BA request denied - IBSS STA %pM does not advertise HT support\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		       pubsta->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	spin_lock_bh(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	/* we have tried too many times, receiver does not want A-MPDU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		goto err_unlock_sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	 * if we have tried more than HT_AGG_BURST_RETRIES times we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	 * will spread our requests in time to avoid stalling connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	 * for too long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	    time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 			HT_AGG_RETRIES_PERIOD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		ht_dbg(sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		       "BA request denied - %d failed requests on %pM tid %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		       sta->ampdu_mlme.addba_req_num[tid], sta->sta.addr, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		goto err_unlock_sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	/* check if the TID is not in aggregation flow already */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		ht_dbg(sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		       "BA request denied - session is not idle on %pM tid %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		       sta->sta.addr, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		goto err_unlock_sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	/* prepare A-MPDU MLME for Tx aggregation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	if (!tid_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		goto err_unlock_sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	skb_queue_head_init(&tid_tx->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	__set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	tid_tx->timeout = timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	tid_tx->sta = sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	tid_tx->tid = tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	/* response timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	timer_setup(&tid_tx->addba_resp_timer, sta_addba_resp_timer_expired, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	/* tx timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	timer_setup(&tid_tx->session_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		    sta_tx_agg_session_timer_expired, TIMER_DEFERRABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	/* assign a dialog token */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	sta->ampdu_mlme.dialog_token_allocator++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	 * Finally, assign it to the start array; the work item will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	 * collect it and move it to the normal array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	sta->ampdu_mlme.tid_start_tx[tid] = tid_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	/* this flow continues off the work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724)  err_unlock_sta:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	spin_unlock_bh(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 					 struct sta_info *sta, u16 tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	struct tid_ampdu_tx *tid_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	struct ieee80211_ampdu_params params = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		.sta = &sta->sta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		.action = IEEE80211_AMPDU_TX_OPERATIONAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		.tid = tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		.timeout = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		.ssn = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	lockdep_assert_held(&sta->ampdu_mlme.mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	params.buf_size = tid_tx->buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	params.amsdu = tid_tx->amsdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	       sta->sta.addr, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	drv_ampdu_action(local, sta->sdata, &params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	 * synchronize with TX path, while splicing the TX path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	 * should block so it won't put more packets onto pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	spin_lock_bh(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	 * Now mark as operational. This will be visible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	 * in the TX path, and lets it go lock-free in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	 * the common case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	ieee80211_agg_splice_finish(sta->sdata, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	spin_unlock_bh(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	ieee80211_agg_start_txq(sta, tid, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 			      struct tid_ampdu_tx *tid_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	struct ieee80211_sub_if_data *sdata = sta->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	if (!test_bit(HT_AGG_STATE_SENT_ADDBA, &tid_tx->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		ieee80211_send_addba_with_timeout(sta, tid_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		/* RESPONSE_RECEIVED state whould trigger the flow again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		ieee80211_agg_tx_operational(local, sta, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) static struct tid_ampdu_tx *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) ieee80211_lookup_tid_tx(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 			const u8 *ra, u16 tid, struct sta_info **sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	struct tid_ampdu_tx *tid_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	if (tid >= IEEE80211_NUM_TIDS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		       tid, IEEE80211_NUM_TIDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	*sta = sta_info_get_bss(sdata, ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	if (!*sta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		ht_dbg(sdata, "Could not find station: %pM\n", ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	tid_tx = rcu_dereference((*sta)->ampdu_mlme.tid_tx[tid]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	if (WARN_ON(!tid_tx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		ht_dbg(sdata, "addBA was not requested!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	return tid_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 				      const u8 *ra, u16 tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	struct sta_info *sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	struct tid_ampdu_tx *tid_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	trace_api_start_tx_ba_cb(sdata, ra, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	if (!tid_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	set_bit(HT_AGG_STATE_START_CB, &tid_tx->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 				   enum ieee80211_agg_stop_reason reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	mutex_lock(&sta->ampdu_mlme.mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	ret = ___ieee80211_stop_tx_ba_session(sta, tid, reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	mutex_unlock(&sta->ampdu_mlme.mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	struct ieee80211_sub_if_data *sdata = sta->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	struct tid_ampdu_tx *tid_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	trace_api_stop_tx_ba_session(pubsta, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	if (!local->ops->ampdu_action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	if (tid >= IEEE80211_NUM_TIDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	spin_lock_bh(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	if (!tid_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	WARN(sta->reserved_tid == tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	     "Requested to stop BA session on reserved tid=%d", tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		/* already in progress stopping it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890)  unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	spin_unlock_bh(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 			     struct tid_ampdu_tx *tid_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	struct ieee80211_sub_if_data *sdata = sta->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	bool send_delba = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	bool start_txq = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	       sta->sta.addr, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	spin_lock_bh(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	if (!test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		ht_dbg(sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		       "unexpected callback to A-MPDU stop for %pM tid %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		       sta->sta.addr, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		goto unlock_sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR && tid_tx->tx_stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		send_delba = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	ieee80211_remove_tid_tx(sta, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	start_txq = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921)  unlock_sta:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	spin_unlock_bh(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	if (start_txq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		ieee80211_agg_start_txq(sta, tid, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	if (send_delba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		ieee80211_send_delba(sdata, sta->sta.addr, tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 			WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 				     const u8 *ra, u16 tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	struct sta_info *sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	struct tid_ampdu_tx *tid_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	trace_api_stop_tx_ba_cb(sdata, ra, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	if (!tid_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	set_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) void ieee80211_process_addba_resp(struct ieee80211_local *local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 				  struct sta_info *sta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 				  struct ieee80211_mgmt *mgmt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 				  size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	struct tid_ampdu_tx *tid_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	struct ieee80211_txq *txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	u16 capab, tid, buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	bool amsdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	buf_size = min(buf_size, local->hw.max_tx_aggregation_subframes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	txq = sta->sta.txq[tid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	if (!amsdu && txq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		set_bit(IEEE80211_TXQ_NO_AMSDU, &to_txq_info(txq)->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	mutex_lock(&sta->ampdu_mlme.mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	if (!tid_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		ht_dbg(sta->sdata, "wrong addBA response token, %pM tid %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		       sta->sta.addr, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	del_timer_sync(&tid_tx->addba_resp_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	ht_dbg(sta->sdata, "switched off addBA timer for %pM tid %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	       sta->sta.addr, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	 * addba_resp_timer may have fired before we got here, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	 * caused WANT_STOP to be set. If the stop then was already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	 * processed further, STOPPING might be set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	    test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		ht_dbg(sta->sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		       "got addBA resp for %pM tid %d but we already gave up\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		       sta->sta.addr, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	 * IEEE 802.11-2007 7.3.1.14:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	 * In an ADDBA Response frame, when the Status Code field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	 * is set to 0, the Buffer Size subfield is set to a value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	 * of at least 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 			== WLAN_STATUS_SUCCESS && buf_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 				     &tid_tx->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 			/* ignore duplicate response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		tid_tx->buf_size = buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		tid_tx->amsdu = amsdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 			ieee80211_agg_tx_operational(local, sta, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		sta->ampdu_mlme.addba_req_num[tid] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		tid_tx->timeout =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 			le16_to_cpu(mgmt->u.action.u.addba_resp.timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		if (tid_tx->timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 			mod_timer(&tid_tx->session_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 				  TU_TO_EXP_TIME(tid_tx->timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 			tid_tx->last_tx = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		___ieee80211_stop_tx_ba_session(sta, tid, AGG_STOP_DECLINED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	mutex_unlock(&sta->ampdu_mlme.mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }