Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright 2002-2005, Instant802 Networks, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright 2005-2006, Devicescape Software, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Copyright 2007	Johannes Berg <johannes@sipsolutions.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Copyright 2013-2014  Intel Mobile Communications GmbH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Copyright (C) 2018-2020 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * Transmit and frame generation functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <net/net_namespace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <net/ieee80211_radiotap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <net/cfg80211.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <net/mac80211.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <net/codel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <net/codel_impl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <net/fq_impl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include "ieee80211_i.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include "driver-ops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include "led.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include "mesh.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include "wep.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include "wpa.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include "wme.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include "rate.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) /* misc utils */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) static inline void ieee80211_tx_stats(struct net_device *dev, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	u64_stats_update_begin(&tstats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	tstats->tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	tstats->tx_bytes += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	u64_stats_update_end(&tstats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 				 struct sk_buff *skb, int group_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 				 int next_frag_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	int rate, mrate, erp, dur, i, shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	struct ieee80211_rate *txrate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	struct ieee80211_local *local = tx->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	struct ieee80211_supported_band *sband;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	struct ieee80211_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	struct ieee80211_chanctx_conf *chanctx_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	u32 rate_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	/* assume HW handles this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	if (tx->rate.flags & (IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	chanctx_conf = rcu_dereference(tx->sdata->vif.chanctx_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	if (chanctx_conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 		shift = ieee80211_chandef_get_shift(&chanctx_conf->def);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 		rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	/* uh huh? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	if (WARN_ON_ONCE(tx->rate.idx < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	sband = local->hw.wiphy->bands[info->band];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	txrate = &sband->bitrates[tx->rate.idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	erp = txrate->flags & IEEE80211_RATE_ERP_G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	/* device is expected to do this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	if (sband->band == NL80211_BAND_S1GHZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	 * data and mgmt (except PS Poll):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	 * - during CFP: 32768
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	 * - during contention period:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	 *   if addr1 is group address: 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	 *   if more fragments = 0 and addr1 is individual address: time to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	 *      transmit one ACK plus SIFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	 *   if more fragments = 1 and addr1 is individual address: time to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	 *      transmit next fragment plus 2 x ACK plus 3 x SIFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	 * IEEE 802.11, 9.6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	 * - control response frame (CTS or ACK) shall be transmitted using the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	 *   same rate as the immediately previous frame in the frame exchange
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	 *   sequence, if this rate belongs to the PHY mandatory rates, or else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	 *   at the highest possible rate belonging to the PHY rates in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	 *   BSSBasicRateSet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	hdr = (struct ieee80211_hdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	if (ieee80211_is_ctl(hdr->frame_control)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		/* TODO: These control frames are not currently sent by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		 * mac80211, but should they be implemented, this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		 * needs to be updated to support duration field calculation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		 * RTS: time needed to transmit pending data/mgmt frame plus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		 *    one CTS frame plus one ACK frame plus 3 x SIFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 		 * CTS: duration of immediately previous RTS minus time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		 *    required to transmit CTS and its SIFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		 * ACK: 0 if immediately previous directed data/mgmt had
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		 *    more=0, with more=1 duration in ACK frame is duration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		 *    from previous frame minus time needed to transmit ACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		 *    and its SIFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		 * PS Poll: BIT(15) | BIT(14) | aid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	/* data/mgmt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	if (0 /* FIX: data/mgmt during CFP */)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		return cpu_to_le16(32768);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	if (group_addr) /* Group address as the destination - no ACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	/* Individual destination address:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	 * IEEE 802.11, Ch. 9.6 (after IEEE 802.11g changes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	 * CTS and ACK frames shall be transmitted using the highest rate in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	 * basic rate set that is less than or equal to the rate of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	 * immediately previous frame and that is using the same modulation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	 * (CCK or OFDM). If no basic rate set matches with these requirements,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	 * the highest mandatory rate of the PHY that is less than or equal to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	 * the rate of the previous frame is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	 * Mandatory rates for IEEE 802.11g PHY: 1, 2, 5.5, 11, 6, 12, 24 Mbps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	rate = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	/* use lowest available if everything fails */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	mrate = sband->bitrates[0].bitrate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	for (i = 0; i < sband->n_bitrates; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 		struct ieee80211_rate *r = &sband->bitrates[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 		if (r->bitrate > txrate->bitrate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		if ((rate_flags & r->flags) != rate_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 		if (tx->sdata->vif.bss_conf.basic_rates & BIT(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 			rate = DIV_ROUND_UP(r->bitrate, 1 << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 		switch (sband->band) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		case NL80211_BAND_2GHZ: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 			u32 flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 			if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 				flag = IEEE80211_RATE_MANDATORY_G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 				flag = IEEE80211_RATE_MANDATORY_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 			if (r->flags & flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 				mrate = r->bitrate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		case NL80211_BAND_5GHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 		case NL80211_BAND_6GHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 			if (r->flags & IEEE80211_RATE_MANDATORY_A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 				mrate = r->bitrate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		case NL80211_BAND_S1GHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 		case NL80211_BAND_60GHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 			/* TODO, for now fall through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		case NUM_NL80211_BANDS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 			WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	if (rate == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		/* No matching basic rate found; use highest suitable mandatory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		 * PHY rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		rate = DIV_ROUND_UP(mrate, 1 << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	/* Don't calculate ACKs for QoS Frames with NoAck Policy set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	if (ieee80211_is_data_qos(hdr->frame_control) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	    *(ieee80211_get_qos_ctl(hdr)) & IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		dur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		/* Time needed to transmit ACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		 * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		 * to closest integer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		dur = ieee80211_frame_duration(sband->band, 10, rate, erp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 				tx->sdata->vif.bss_conf.use_short_preamble,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 				shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	if (next_frag_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		/* Frame is fragmented: duration increases with time needed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		 * transmit next fragment plus ACK and 2 x SIFS. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		dur *= 2; /* ACK + SIFS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		/* next fragment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		dur += ieee80211_frame_duration(sband->band, next_frag_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 				txrate->bitrate, erp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 				tx->sdata->vif.bss_conf.use_short_preamble,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 				shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	return cpu_to_le16(dur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) /* tx handlers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) static ieee80211_tx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	struct ieee80211_local *local = tx->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	struct ieee80211_if_managed *ifmgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	/* driver doesn't support power save */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	/* hardware does dynamic power save */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	if (ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	/* dynamic power save disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	if (local->hw.conf.dynamic_ps_timeout <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	/* we are scanning, don't enable power save */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	if (local->scanning)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	if (!local->ps_sdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	/* No point if we're going to suspend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	if (local->quiescing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	/* dynamic ps is supported only in managed mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	if (tx->sdata->vif.type != NL80211_IFTYPE_STATION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	if (unlikely(info->flags & IEEE80211_TX_INTFL_OFFCHAN_TX_OK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	ifmgd = &tx->sdata->u.mgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	 * Don't wakeup from power save if u-apsd is enabled, voip ac has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	 * u-apsd enabled and the frame is in voip class. This effectively
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	 * means that even if all access categories have u-apsd enabled, in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	 * practise u-apsd is only used with the voip ac. This is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	 * workaround for the case when received voip class packets do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	 * have correct qos tag for some reason, due the network or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	 * peer application.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	 * Note: ifmgd->uapsd_queues access is racy here. If the value is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	 * changed via debugfs, user needs to reassociate manually to have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	 * everything in sync.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	    (ifmgd->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	    skb_get_queue_mapping(tx->skb) == IEEE80211_AC_VO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	if (local->hw.conf.flags & IEEE80211_CONF_PS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		ieee80211_stop_queues_by_reason(&local->hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 						IEEE80211_MAX_QUEUE_MAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 						IEEE80211_QUEUE_STOP_REASON_PS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 						false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		ieee80211_queue_work(&local->hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 				     &local->dynamic_ps_disable_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	/* Don't restart the timer if we're not disassociated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	if (!ifmgd->associated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	mod_timer(&local->dynamic_ps_timer, jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		  msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) static ieee80211_tx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	bool assoc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	    test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	    !ieee80211_is_probe_req(hdr->frame_control) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	    !ieee80211_is_any_nullfunc(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		 * When software scanning only nullfunc frames (to notify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		 * the sleep state to the AP) and probe requests (for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		 * active scan) are allowed, all other frames should not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		 * sent and we should not get here, but if we do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		 * nonetheless, drop them to avoid sending them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		 * off-channel. See the link below and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		 * ieee80211_start_scan() for more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 		 * http://article.gmane.org/gmane.linux.kernel.wireless.general/30089
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		return TX_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	if (tx->sdata->vif.type == NL80211_IFTYPE_OCB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	if (tx->sdata->vif.type == NL80211_IFTYPE_WDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	if (tx->flags & IEEE80211_TX_PS_BUFFERED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	if (tx->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	if (likely(tx->flags & IEEE80211_TX_UNICAST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		if (unlikely(!assoc &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 			     ieee80211_is_data(hdr->frame_control))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 			sdata_info(tx->sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 				   "dropped data frame to not associated station %pM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 				   hdr->addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 			I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 			return TX_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	} else if (unlikely(ieee80211_is_data(hdr->frame_control) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 			    ieee80211_vif_get_num_mcast_if(tx->sdata) == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		 * No associated STAs - no need to send multicast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		 * frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		return TX_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) /* This function is called whenever the AP is about to exceed the maximum limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355)  * of buffered frames for power saving STAs. This situation should not really
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356)  * happen often during normal operation, so dropping the oldest buffered packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357)  * from each queue should be OK to make some room for new frames. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) static void purge_old_ps_buffers(struct ieee80211_local *local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	int total = 0, purged = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	struct ieee80211_sub_if_data *sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	struct sta_info *sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		struct ps_data *ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		if (sdata->vif.type == NL80211_IFTYPE_AP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 			ps = &sdata->u.ap.ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		else if (ieee80211_vif_is_mesh(&sdata->vif))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 			ps = &sdata->u.mesh.ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		skb = skb_dequeue(&ps->bc_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 			purged++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 			ieee80211_free_txskb(&local->hw, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		total += skb_queue_len(&ps->bc_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	 * Drop one frame from each station from the lowest-priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	 * AC that has frames at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	list_for_each_entry_rcu(sta, &local->sta_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		int ac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		for (ac = IEEE80211_AC_BK; ac >= IEEE80211_AC_VO; ac--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 			skb = skb_dequeue(&sta->ps_tx_buf[ac]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 			total += skb_queue_len(&sta->ps_tx_buf[ac]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 			if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 				purged++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 				ieee80211_free_txskb(&local->hw, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	local->total_ps_buffered = total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	ps_dbg_hw(&local->hw, "PS buffers full - purged %d frames\n", purged);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) static ieee80211_tx_result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	struct ps_data *ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	 * broadcast/multicast frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	 * If any of the associated/peer stations is in power save mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	 * the frame is buffered to be sent after DTIM beacon frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	 * This is done either by the hardware or us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	/* powersaving STAs currently only in AP/VLAN/mesh mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	if (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	    tx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		if (!tx->sdata->bss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 			return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		ps = &tx->sdata->bss->ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	} else if (ieee80211_vif_is_mesh(&tx->sdata->vif)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		ps = &tx->sdata->u.mesh.ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	/* no buffering for ordered frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	if (ieee80211_has_order(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	if (ieee80211_is_probe_req(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	if (ieee80211_hw_check(&tx->local->hw, QUEUE_CONTROL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		info->hw_queue = tx->sdata->vif.cab_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	/* no stations in PS mode and no buffered packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	if (!atomic_read(&ps->num_sta_ps) && skb_queue_empty(&ps->bc_buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	/* device releases frame after DTIM beacon */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	if (!ieee80211_hw_check(&tx->local->hw, HOST_BROADCAST_PS_BUFFERING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	/* buffered in mac80211 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		purge_old_ps_buffers(tx->local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		ps_dbg(tx->sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		       "BC TX buffer full - dropping the oldest frame\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		ieee80211_free_txskb(&tx->local->hw, skb_dequeue(&ps->bc_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		tx->local->total_ps_buffered++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	skb_queue_tail(&ps->bc_buf, tx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	return TX_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 			     struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	if (!ieee80211_is_mgmt(fc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	if (sta == NULL || !test_sta_flag(sta, WLAN_STA_MFP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	if (!ieee80211_is_robust_mgmt_frame(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) static ieee80211_tx_result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	struct sta_info *sta = tx->sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	struct ieee80211_local *local = tx->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	if (unlikely(!sta))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	if (unlikely((test_sta_flag(sta, WLAN_STA_PS_STA) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		      test_sta_flag(sta, WLAN_STA_PS_DRIVER) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		      test_sta_flag(sta, WLAN_STA_PS_DELIVER)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		     !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		int ac = skb_get_queue_mapping(tx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		if (ieee80211_is_mgmt(hdr->frame_control) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		    !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 			info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 			return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		       sta->sta.addr, sta->sta.aid, ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 			purge_old_ps_buffers(tx->local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		/* sync with ieee80211_sta_ps_deliver_wakeup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		spin_lock(&sta->ps_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		 * STA woke up the meantime and all the frames on ps_tx_buf have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		 * been queued to pending queue. No reordering can happen, go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		 * ahead and Tx the packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		    !test_sta_flag(sta, WLAN_STA_PS_DRIVER) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		    !test_sta_flag(sta, WLAN_STA_PS_DELIVER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 			spin_unlock(&sta->ps_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 			return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 			struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 			ps_dbg(tx->sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 			       "STA %pM TX buffer for AC %d full - dropping oldest frame\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 			       sta->sta.addr, ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 			ieee80211_free_txskb(&local->hw, old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 			tx->local->total_ps_buffered++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		info->control.jiffies = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		info->control.vif = &tx->sdata->vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		spin_unlock(&sta->ps_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		if (!timer_pending(&local->sta_cleanup))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 			mod_timer(&local->sta_cleanup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 				  round_jiffies(jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 						STA_INFO_CLEANUP_INTERVAL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		 * We queued up some frames, so the TIM bit might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		 * need to be set, recalculate it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		sta_info_recalc_tim(sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		return TX_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	} else if (unlikely(test_sta_flag(sta, WLAN_STA_PS_STA))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		ps_dbg(tx->sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		       "STA %pM in PS mode, but polling/in SP -> send frame\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		       sta->sta.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) static ieee80211_tx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	if (tx->flags & IEEE80211_TX_UNICAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		return ieee80211_tx_h_unicast_ps_buf(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		return ieee80211_tx_h_multicast_ps_buf(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) static ieee80211_tx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		if (tx->sdata->control_port_no_encrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 			info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		info->flags |= IEEE80211_TX_CTL_USE_MINRATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) static ieee80211_tx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	struct ieee80211_key *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		tx->key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	if (tx->sta &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	    (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx])))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		tx->key = key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	else if (ieee80211_is_group_privacy_action(tx->skb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		(key = rcu_dereference(tx->sdata->default_multicast_key)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		tx->key = key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	else if (ieee80211_is_mgmt(hdr->frame_control) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		 is_multicast_ether_addr(hdr->addr1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		 ieee80211_is_robust_mgmt_frame(tx->skb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		 (key = rcu_dereference(tx->sdata->default_mgmt_key)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		tx->key = key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	else if (is_multicast_ether_addr(hdr->addr1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		 (key = rcu_dereference(tx->sdata->default_multicast_key)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		tx->key = key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	else if (!is_multicast_ether_addr(hdr->addr1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		 (key = rcu_dereference(tx->sdata->default_unicast_key)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		tx->key = key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		tx->key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	if (tx->key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		bool skip_hw = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		/* TODO: add threshold stuff again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		switch (tx->key->conf.cipher) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		case WLAN_CIPHER_SUITE_WEP40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		case WLAN_CIPHER_SUITE_WEP104:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		case WLAN_CIPHER_SUITE_TKIP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 			if (!ieee80211_is_data_present(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 				tx->key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		case WLAN_CIPHER_SUITE_CCMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		case WLAN_CIPHER_SUITE_CCMP_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		case WLAN_CIPHER_SUITE_GCMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		case WLAN_CIPHER_SUITE_GCMP_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 			if (!ieee80211_is_data_present(hdr->frame_control) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 			    !ieee80211_use_mfp(hdr->frame_control, tx->sta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 					       tx->skb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 			    !ieee80211_is_group_privacy_action(tx->skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 				tx->key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 				skip_hw = (tx->key->conf.flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 					   IEEE80211_KEY_FLAG_SW_MGMT_TX) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 					ieee80211_is_mgmt(hdr->frame_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		case WLAN_CIPHER_SUITE_AES_CMAC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		case WLAN_CIPHER_SUITE_BIP_CMAC_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		case WLAN_CIPHER_SUITE_BIP_GMAC_128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		case WLAN_CIPHER_SUITE_BIP_GMAC_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 			if (!ieee80211_is_mgmt(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 				tx->key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 			     !ieee80211_is_deauth(hdr->frame_control)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 			return TX_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		if (!skip_hw && tx->key &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		    tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 			info->control.hw_key = &tx->key->conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	} else if (ieee80211_is_data_present(hdr->frame_control) && tx->sta &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		   test_sta_flag(tx->sta, WLAN_STA_USES_ENCRYPTION)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		return TX_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) static ieee80211_tx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	struct ieee80211_hdr *hdr = (void *)tx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	struct ieee80211_supported_band *sband;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	struct ieee80211_tx_rate_control txrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	struct ieee80211_sta_rates *ratetbl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	bool assoc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	memset(&txrc, 0, sizeof(txrc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	sband = tx->local->hw.wiphy->bands[info->band];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	len = min_t(u32, tx->skb->len + FCS_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 			 tx->local->hw.wiphy->frag_threshold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	/* set up the tx rate control struct we give the RC algo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	txrc.hw = &tx->local->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	txrc.sband = sband;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	txrc.bss_conf = &tx->sdata->vif.bss_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	txrc.skb = tx->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	txrc.reported_rate.idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	if (tx->sdata->rc_has_mcs_mask[info->band])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		txrc.rate_idx_mcs_mask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 			tx->sdata->rc_rateidx_mcs_mask[info->band];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		    tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		    tx->sdata->vif.type == NL80211_IFTYPE_ADHOC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		    tx->sdata->vif.type == NL80211_IFTYPE_OCB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	/* set up RTS protection if desired */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	if (len > tx->local->hw.wiphy->rts_threshold) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		txrc.rts = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	info->control.use_rts = txrc.rts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	info->control.use_cts_prot = tx->sdata->vif.bss_conf.use_cts_prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	 * Use short preamble if the BSS can handle it, but not for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	 * management frames unless we know the receiver can handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	 * that -- the management frame might be to a station that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	 * just wants a probe response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	if (tx->sdata->vif.bss_conf.use_short_preamble &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	    (ieee80211_is_data(hdr->frame_control) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	     (tx->sta && test_sta_flag(tx->sta, WLAN_STA_SHORT_PREAMBLE))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		txrc.short_preamble = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	info->control.short_preamble = txrc.short_preamble;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	/* don't ask rate control when rate already injected via radiotap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	if (info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	if (tx->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	 * Lets not bother rate control if we're associated and cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	 * talk to the sta. This should not happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) && assoc &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		 !rate_usable_index_exists(sband, &tx->sta->sta),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		 "%s: Dropped data frame as no usable bitrate found while "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		 "scanning and associated. Target station: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		 "%pM on %d GHz band\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		 tx->sdata->name, hdr->addr1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		 info->band ? 5 : 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		return TX_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	 * If we're associated with the sta at this point we know we can at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	 * least send the frame at the lowest bit rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	rate_control_get_rate(tx->sdata, tx->sta, &txrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	if (tx->sta && !info->control.skip_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		ratetbl = rcu_dereference(tx->sta->sta.rates);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	if (unlikely(info->control.rates[0].idx < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		if (ratetbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 			struct ieee80211_tx_rate rate = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 				.idx = ratetbl->rate[0].idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 				.flags = ratetbl->rate[0].flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 				.count = ratetbl->rate[0].count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 			};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 			if (ratetbl->rate[0].idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 				return TX_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 			tx->rate = rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 			return TX_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		tx->rate = info->control.rates[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	if (txrc.reported_rate.idx < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		txrc.reported_rate = tx->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		if (tx->sta && ieee80211_is_data(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 			tx->sta->tx_stats.last_rate = txrc.reported_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	} else if (tx->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		tx->sta->tx_stats.last_rate = txrc.reported_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	if (ratetbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	if (unlikely(!info->control.rates[0].count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		info->control.rates[0].count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	if (WARN_ON_ONCE((info->control.rates[0].count > 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 			 (info->flags & IEEE80211_TX_CTL_NO_ACK)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		info->control.rates[0].count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) static __le16 ieee80211_tx_next_seq(struct sta_info *sta, int tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	u16 *seq = &sta->tid_seq[tid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	__le16 ret = cpu_to_le16(*seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	/* Increase the sequence number. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	*seq = (*seq + 0x10) & IEEE80211_SCTL_SEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) static ieee80211_tx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	int tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	 * Packet injection may want to control the sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	 * number, if we have no matching interface then we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	 * neither assign one ourselves nor ask the driver to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	if (unlikely(info->control.vif->type == NL80211_IFTYPE_MONITOR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	if (unlikely(ieee80211_is_ctl(hdr->frame_control)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	if (ieee80211_hdrlen(hdr->frame_control) < 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	if (ieee80211_is_qos_nullfunc(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	if (info->control.flags & IEEE80211_TX_CTRL_NO_SEQNO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	 * Anything but QoS data that has a sequence number field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	 * (is long enough) gets a sequence number from the global
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	 * counter.  QoS data frames with a multicast destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	 * also use the global counter (802.11-2012 9.3.2.10).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	if (!ieee80211_is_data_qos(hdr->frame_control) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	    is_multicast_ether_addr(hdr->addr1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		/* driver should assign sequence number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		/* for pure STA mode without beacons, we can do it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		tx->sdata->sequence_number += 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		if (tx->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 			tx->sta->tx_stats.msdu[IEEE80211_NUM_TIDS]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	 * This should be true for injected/management frames only, for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	 * management frames we have set the IEEE80211_TX_CTL_ASSIGN_SEQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	 * above since they are not QoS-data frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	if (!tx->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	/* include per-STA, per-TID sequence counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	tid = ieee80211_get_tid(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	tx->sta->tx_stats.msdu[tid]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) static int ieee80211_fragment(struct ieee80211_tx_data *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 			      struct sk_buff *skb, int hdrlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 			      int frag_threshold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	struct ieee80211_local *local = tx->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	struct ieee80211_tx_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	struct sk_buff *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	int per_fragm = frag_threshold - hdrlen - FCS_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	int pos = hdrlen + per_fragm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	int rem = skb->len - hdrlen - per_fragm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	if (WARN_ON(rem < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	/* first fragment was already added to queue by caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	while (rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		int fraglen = per_fragm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		if (fraglen > rem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 			fraglen = rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		rem -= fraglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		tmp = dev_alloc_skb(local->tx_headroom +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 				    frag_threshold +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 				    tx->sdata->encrypt_headroom +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 				    IEEE80211_ENCRYPT_TAILROOM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		__skb_queue_tail(&tx->skbs, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		skb_reserve(tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 			    local->tx_headroom + tx->sdata->encrypt_headroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		/* copy control information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		memcpy(tmp->cb, skb->cb, sizeof(tmp->cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		info = IEEE80211_SKB_CB(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 				 IEEE80211_TX_CTL_FIRST_FRAGMENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		if (rem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 			info->flags |= IEEE80211_TX_CTL_MORE_FRAMES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		skb_copy_queue_mapping(tmp, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		tmp->priority = skb->priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		tmp->dev = skb->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		/* copy header and data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		skb_put_data(tmp, skb->data, hdrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		skb_put_data(tmp, skb->data + pos, fraglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		pos += fraglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	/* adjust first fragment's length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	skb_trim(skb, hdrlen + per_fragm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) static ieee80211_tx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	struct sk_buff *skb = tx->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	struct ieee80211_hdr *hdr = (void *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	int frag_threshold = tx->local->hw.wiphy->frag_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	int hdrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	int fragnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	/* no matter what happens, tx->skb moves to tx->skbs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	__skb_queue_tail(&tx->skbs, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	tx->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	if (info->flags & IEEE80211_TX_CTL_DONTFRAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	if (ieee80211_hw_check(&tx->local->hw, SUPPORTS_TX_FRAG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	 * Warn when submitting a fragmented A-MPDU frame and drop it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	 * This scenario is handled in ieee80211_tx_prepare but extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	 * caution taken here as fragmented ampdu may cause Tx stop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		return TX_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	/* internal error, why isn't DONTFRAG set? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	if (WARN_ON(skb->len + FCS_LEN <= frag_threshold))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		return TX_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	 * Now fragment the frame. This will allocate all the fragments and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	 * chain them (using skb as the first fragment) to skb->next.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	 * During transmission, we will remove the successfully transmitted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	 * fragments from this list. When the low-level driver rejects one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	 * of the fragments then we will simply pretend to accept the skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	 * but store it away as pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	if (ieee80211_fragment(tx, skb, hdrlen, frag_threshold))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		return TX_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	/* update duration/seq/flags of fragments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	fragnum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	skb_queue_walk(&tx->skbs, skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		hdr = (void *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		info = IEEE80211_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		if (!skb_queue_is_last(&tx->skbs, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 			hdr->frame_control |= morefrags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 			 * No multi-rate retries for fragmented frames, that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 			 * would completely throw off the NAV at other STAs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 			info->control.rates[1].idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 			info->control.rates[2].idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 			info->control.rates[3].idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 			BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 			info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 			hdr->frame_control &= ~morefrags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		fragnum++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) static ieee80211_tx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	int ac = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	if (!tx->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	skb_queue_walk(&tx->skbs, skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		ac = skb_get_queue_mapping(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		tx->sta->tx_stats.bytes[ac] += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	if (ac >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		tx->sta->tx_stats.packets[ac]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) static ieee80211_tx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	if (!tx->key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	switch (tx->key->conf.cipher) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	case WLAN_CIPHER_SUITE_WEP40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	case WLAN_CIPHER_SUITE_WEP104:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		return ieee80211_crypto_wep_encrypt(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	case WLAN_CIPHER_SUITE_TKIP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		return ieee80211_crypto_tkip_encrypt(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	case WLAN_CIPHER_SUITE_CCMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		return ieee80211_crypto_ccmp_encrypt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 			tx, IEEE80211_CCMP_MIC_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	case WLAN_CIPHER_SUITE_CCMP_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		return ieee80211_crypto_ccmp_encrypt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 			tx, IEEE80211_CCMP_256_MIC_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	case WLAN_CIPHER_SUITE_AES_CMAC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		return ieee80211_crypto_aes_cmac_encrypt(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	case WLAN_CIPHER_SUITE_BIP_CMAC_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		return ieee80211_crypto_aes_cmac_256_encrypt(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	case WLAN_CIPHER_SUITE_BIP_GMAC_128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	case WLAN_CIPHER_SUITE_BIP_GMAC_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		return ieee80211_crypto_aes_gmac_encrypt(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	case WLAN_CIPHER_SUITE_GCMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	case WLAN_CIPHER_SUITE_GCMP_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		return ieee80211_crypto_gcmp_encrypt(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		return ieee80211_crypto_hw_encrypt(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	return TX_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) static ieee80211_tx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	struct ieee80211_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	int next_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	bool group_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	skb_queue_walk(&tx->skbs, skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		hdr = (void *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		if (unlikely(ieee80211_is_pspoll(hdr->frame_control)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 			break; /* must not overwrite AID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		if (!skb_queue_is_last(&tx->skbs, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 			struct sk_buff *next = skb_queue_next(&tx->skbs, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 			next_len = next->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 			next_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		group_addr = is_multicast_ether_addr(hdr->addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		hdr->duration_id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 			ieee80211_duration(tx, skb, group_addr, next_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) /* actual transmit path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 				  struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 				  struct ieee80211_tx_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 				  struct tid_ampdu_tx *tid_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 				  int tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	bool queued = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	bool reset_agg_timer = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	struct sk_buff *purge_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		info->flags |= IEEE80211_TX_CTL_AMPDU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		reset_agg_timer = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	} else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		 * nothing -- this aggregation session is being started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		 * but that might still fail with the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	} else if (!tx->sta->sta.txq[tid]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		spin_lock(&tx->sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		 * Need to re-check now, because we may get here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		 *  1) in the window during which the setup is actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		 *     already done, but not marked yet because not all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		 *     packets are spliced over to the driver pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		 *     queue yet -- if this happened we acquire the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		 *     either before or after the splice happens, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		 *     need to recheck which of these cases happened.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		 *  2) during session teardown, if the OPERATIONAL bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		 *     was cleared due to the teardown but the pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		 *     hasn't been assigned NULL yet (or we loaded it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		 *     before it was assigned) -- in this case it may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		 *     now be NULL which means we should just let the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		 *     packet pass through because splicing the frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		 *     back is already done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		tid_tx = rcu_dereference_protected_tid_tx(tx->sta, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		if (!tid_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 			/* do nothing, let packet pass through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		} else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 			info->flags |= IEEE80211_TX_CTL_AMPDU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 			reset_agg_timer = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 			queued = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 			if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 				clear_sta_flag(tx->sta, WLAN_STA_SP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 				ps_dbg(tx->sta->sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 				       "STA %pM aid %d: SP frame queued, close the SP w/o telling the peer\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 				       tx->sta->sta.addr, tx->sta->sta.aid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 			info->control.vif = &tx->sdata->vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 			info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 			info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 			__skb_queue_tail(&tid_tx->pending, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 			if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 				purge_skb = __skb_dequeue(&tid_tx->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		spin_unlock(&tx->sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		if (purge_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 			ieee80211_free_txskb(&tx->local->hw, purge_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	/* reset session timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	if (reset_agg_timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		tid_tx->last_tx = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	return queued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)  * initialises @tx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)  * pass %NULL for the station if unknown, a valid pointer if known
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)  * or an ERR_PTR() if the station is known not to exist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) static ieee80211_tx_result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		     struct ieee80211_tx_data *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		     struct sta_info *sta, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	struct ieee80211_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	int tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	memset(tx, 0, sizeof(*tx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	tx->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	tx->local = local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	tx->sdata = sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	__skb_queue_head_init(&tx->skbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	 * If this flag is set to true anywhere, and we get here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	 * we are doing the needed processing, so remove the flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	 * now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	info->control.flags &= ~IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	hdr = (struct ieee80211_hdr *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	if (likely(sta)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		if (!IS_ERR(sta))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 			tx->sta = sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 			tx->sta = rcu_dereference(sdata->u.vlan.sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 			if (!tx->sta && sdata->wdev.use_4addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 				return TX_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		} else if (info->flags & (IEEE80211_TX_INTFL_NL80211_FRAME_TX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 					  IEEE80211_TX_CTL_INJECTED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 			   tx->sdata->control_port_protocol == tx->skb->protocol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 			tx->sta = sta_info_get_bss(sdata, hdr->addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		if (!tx->sta && !is_multicast_ether_addr(hdr->addr1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 			tx->sta = sta_info_get(sdata, hdr->addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	    !ieee80211_is_qos_nullfunc(hdr->frame_control) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	    ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	    !ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 		struct tid_ampdu_tx *tid_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		tid = ieee80211_get_tid(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		if (tid_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 			bool queued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 			queued = ieee80211_tx_prep_agg(tx, skb, info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 						       tid_tx, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 			if (unlikely(queued))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 				return TX_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	if (is_multicast_ether_addr(hdr->addr1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		tx->flags &= ~IEEE80211_TX_UNICAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		info->flags |= IEEE80211_TX_CTL_NO_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		tx->flags |= IEEE80211_TX_UNICAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	if (!(info->flags & IEEE80211_TX_CTL_DONTFRAG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		if (!(tx->flags & IEEE80211_TX_UNICAST) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		    skb->len + FCS_LEN <= local->hw.wiphy->frag_threshold ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		    info->flags & IEEE80211_TX_CTL_AMPDU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 			info->flags |= IEEE80211_TX_CTL_DONTFRAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	if (!tx->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		ieee80211_check_fast_xmit(tx->sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	return TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 					  struct ieee80211_vif *vif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 					  struct sta_info *sta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 					  struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	struct ieee80211_txq *txq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	    (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	    unlikely(!ieee80211_is_data_present(hdr->frame_control))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 		if ((!ieee80211_is_mgmt(hdr->frame_control) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		     ieee80211_is_bufferable_mmpdu(hdr->frame_control) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		     vif->type == NL80211_IFTYPE_STATION) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		    sta && sta->uploaded) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 			 * This will be NULL if the driver didn't set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 			 * opt-in hardware flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 			txq = sta->sta.txq[IEEE80211_NUM_TIDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	} else if (sta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 		if (!sta->uploaded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 		txq = sta->sta.txq[tid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	} else if (vif) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 		txq = vif->txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	if (!txq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	return to_txq_info(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) static void ieee80211_set_skb_enqueue_time(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	IEEE80211_SKB_CB(skb)->control.enqueue_time = codel_get_time();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) static u32 codel_skb_len_func(const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) static codel_time_t codel_skb_time_func(const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	const struct ieee80211_tx_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	info = (const struct ieee80211_tx_info *)skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	return info->control.enqueue_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) static struct sk_buff *codel_dequeue_func(struct codel_vars *cvars,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 					  void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	struct ieee80211_local *local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	struct txq_info *txqi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	struct fq *fq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	struct fq_flow *flow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	txqi = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	local = vif_to_sdata(txqi->txq.vif)->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	fq = &local->fq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	if (cvars == &txqi->def_cvars)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		flow = &txqi->def_flow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		flow = &fq->flows[cvars - local->cvars];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	return fq_flow_dequeue(fq, flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) static void codel_drop_func(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 			    void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	struct ieee80211_local *local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	struct ieee80211_hw *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	struct txq_info *txqi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	txqi = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	local = vif_to_sdata(txqi->txq.vif)->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	hw = &local->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	ieee80211_free_txskb(hw, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) static struct sk_buff *fq_tin_dequeue_func(struct fq *fq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 					   struct fq_tin *tin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 					   struct fq_flow *flow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	struct ieee80211_local *local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	struct txq_info *txqi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	struct codel_vars *cvars;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	struct codel_params *cparams;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	struct codel_stats *cstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	local = container_of(fq, struct ieee80211_local, fq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	txqi = container_of(tin, struct txq_info, tin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	cstats = &txqi->cstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	if (txqi->txq.sta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		struct sta_info *sta = container_of(txqi->txq.sta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 						    struct sta_info, sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		cparams = &sta->cparams;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		cparams = &local->cparams;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	if (flow == &txqi->def_flow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		cvars = &txqi->def_cvars;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		cvars = &local->cvars[flow - fq->flows];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	return codel_dequeue(txqi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 			     &flow->backlog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 			     cparams,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 			     cvars,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 			     cstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 			     codel_skb_len_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 			     codel_skb_time_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 			     codel_drop_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 			     codel_dequeue_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) static void fq_skb_free_func(struct fq *fq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 			     struct fq_tin *tin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 			     struct fq_flow *flow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 			     struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	struct ieee80211_local *local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	local = container_of(fq, struct ieee80211_local, fq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	ieee80211_free_txskb(&local->hw, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) static struct fq_flow *fq_flow_get_default_func(struct fq *fq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 						struct fq_tin *tin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 						int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 						struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	struct txq_info *txqi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	txqi = container_of(tin, struct txq_info, tin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	return &txqi->def_flow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) static void ieee80211_txq_enqueue(struct ieee80211_local *local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 				  struct txq_info *txqi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 				  struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	struct fq *fq = &local->fq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	struct fq_tin *tin = &txqi->tin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	u32 flow_idx = fq_flow_idx(fq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	ieee80211_set_skb_enqueue_time(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	spin_lock_bh(&fq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	fq_tin_enqueue(fq, tin, flow_idx, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		       fq_skb_free_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		       fq_flow_get_default_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	spin_unlock_bh(&fq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) static bool fq_vlan_filter_func(struct fq *fq, struct fq_tin *tin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 				struct fq_flow *flow, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 				void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	return info->control.vif == data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) void ieee80211_txq_remove_vlan(struct ieee80211_local *local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 			       struct ieee80211_sub_if_data *sdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	struct fq *fq = &local->fq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	struct txq_info *txqi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	struct fq_tin *tin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	struct ieee80211_sub_if_data *ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	ap = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	if (!ap->vif.txq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	txqi = to_txq_info(ap->vif.txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	tin = &txqi->tin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	spin_lock_bh(&fq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	fq_tin_filter(fq, tin, fq_vlan_filter_func, &sdata->vif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		      fq_skb_free_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	spin_unlock_bh(&fq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 			struct sta_info *sta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 			struct txq_info *txqi, int tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	fq_tin_init(&txqi->tin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	fq_flow_init(&txqi->def_flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	codel_vars_init(&txqi->def_cvars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	codel_stats_init(&txqi->cstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	__skb_queue_head_init(&txqi->frags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	INIT_LIST_HEAD(&txqi->schedule_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	txqi->txq.vif = &sdata->vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	if (!sta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 		sdata->vif.txq = &txqi->txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		txqi->txq.tid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 		txqi->txq.ac = IEEE80211_AC_BE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	if (tid == IEEE80211_NUM_TIDS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		if (sdata->vif.type == NL80211_IFTYPE_STATION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 			/* Drivers need to opt in to the management MPDU TXQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 			if (!ieee80211_hw_check(&sdata->local->hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 						STA_MMPDU_TXQ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		} else if (!ieee80211_hw_check(&sdata->local->hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 					       BUFF_MMPDU_TXQ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 			/* Drivers need to opt in to the bufferable MMPDU TXQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		txqi->txq.ac = IEEE80211_AC_VO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		txqi->txq.ac = ieee80211_ac_from_tid(tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	txqi->txq.sta = &sta->sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	txqi->txq.tid = tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	sta->sta.txq[tid] = &txqi->txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) void ieee80211_txq_purge(struct ieee80211_local *local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 			 struct txq_info *txqi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	struct fq *fq = &local->fq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	struct fq_tin *tin = &txqi->tin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	spin_lock_bh(&fq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	fq_tin_reset(fq, tin, fq_skb_free_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	ieee80211_purge_tx_queue(&local->hw, &txqi->frags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	spin_unlock_bh(&fq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	spin_lock_bh(&local->active_txq_lock[txqi->txq.ac]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	list_del_init(&txqi->schedule_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	spin_unlock_bh(&local->active_txq_lock[txqi->txq.ac]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) void ieee80211_txq_set_params(struct ieee80211_local *local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	if (local->hw.wiphy->txq_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 		local->fq.limit = local->hw.wiphy->txq_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 		local->hw.wiphy->txq_limit = local->fq.limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	if (local->hw.wiphy->txq_memory_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 		local->fq.memory_limit = local->hw.wiphy->txq_memory_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		local->hw.wiphy->txq_memory_limit = local->fq.memory_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	if (local->hw.wiphy->txq_quantum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 		local->fq.quantum = local->hw.wiphy->txq_quantum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 		local->hw.wiphy->txq_quantum = local->fq.quantum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) int ieee80211_txq_setup_flows(struct ieee80211_local *local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	struct fq *fq = &local->fq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	bool supp_vht = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	enum nl80211_band band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	if (!local->ops->wake_tx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	ret = fq_init(fq, 4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	 * If the hardware doesn't support VHT, it is safe to limit the maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	 * queue size. 4 Mbytes is 64 max-size aggregates in 802.11n.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	for (band = 0; band < NUM_NL80211_BANDS; band++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 		struct ieee80211_supported_band *sband;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 		sband = local->hw.wiphy->bands[band];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 		if (!sband)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 		supp_vht = supp_vht || sband->vht_cap.vht_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	if (!supp_vht)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		fq->memory_limit = 4 << 20; /* 4 Mbytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	codel_params_init(&local->cparams);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	local->cparams.interval = MS2TIME(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	local->cparams.target = MS2TIME(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	local->cparams.ecn = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	local->cvars = kcalloc(fq->flows_cnt, sizeof(local->cvars[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 			       GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	if (!local->cvars) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		spin_lock_bh(&fq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		fq_reset(fq, fq_skb_free_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 		spin_unlock_bh(&fq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	for (i = 0; i < fq->flows_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 		codel_vars_init(&local->cvars[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	ieee80211_txq_set_params(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) void ieee80211_txq_teardown_flows(struct ieee80211_local *local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	struct fq *fq = &local->fq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	if (!local->ops->wake_tx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	kfree(local->cvars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	local->cvars = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	spin_lock_bh(&fq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	fq_reset(fq, fq_skb_free_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	spin_unlock_bh(&fq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) static bool ieee80211_queue_skb(struct ieee80211_local *local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 				struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 				struct sta_info *sta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 				struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	struct ieee80211_vif *vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	struct txq_info *txqi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	if (!local->ops->wake_tx_queue ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	    sdata->vif.type == NL80211_IFTYPE_MONITOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 		sdata = container_of(sdata->bss,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 				     struct ieee80211_sub_if_data, u.ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	vif = &sdata->vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	txqi = ieee80211_get_txq(local, vif, sta, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	if (!txqi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	ieee80211_txq_enqueue(local, txqi, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	schedule_and_wake_txq(local, txqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) static bool ieee80211_tx_frags(struct ieee80211_local *local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 			       struct ieee80211_vif *vif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 			       struct sta_info *sta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 			       struct sk_buff_head *skbs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 			       bool txpending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	struct ieee80211_tx_control control = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	struct sk_buff *skb, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	skb_queue_walk_safe(skbs, skb, tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 		int q = info->hw_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 		if (WARN_ON_ONCE(q >= local->hw.queues)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 			__skb_unlink(skb, skbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 			ieee80211_free_txskb(&local->hw, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 		if (local->queue_stop_reasons[q] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 		    (!txpending && !skb_queue_empty(&local->pending[q]))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 			if (unlikely(info->flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 				     IEEE80211_TX_INTFL_OFFCHAN_TX_OK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 				if (local->queue_stop_reasons[q] &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 				    ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 					/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 					 * Drop off-channel frames if queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 					 * are stopped for any reason other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 					 * than off-channel operation. Never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 					 * queue them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 					 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 					spin_unlock_irqrestore(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 						&local->queue_stop_reason_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 						flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 					ieee80211_purge_tx_queue(&local->hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 								 skbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 					return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 				 * Since queue is stopped, queue up frames for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 				 * later transmission from the tx-pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 				 * tasklet when the queue is woken again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 				if (txpending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 					skb_queue_splice_init(skbs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 							      &local->pending[q]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 					skb_queue_splice_tail_init(skbs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 								   &local->pending[q]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 				spin_unlock_irqrestore(&local->queue_stop_reason_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 						       flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 				return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 		spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 		info->control.vif = vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		control.sta = sta ? &sta->sta : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 		__skb_unlink(skb, skbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		drv_tx(local, &control, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)  * Returns false if the frame couldn't be transmitted but was queued instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) static bool __ieee80211_tx(struct ieee80211_local *local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 			   struct sk_buff_head *skbs, int led_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 			   struct sta_info *sta, bool txpending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	struct ieee80211_tx_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	struct ieee80211_sub_if_data *sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	struct ieee80211_vif *vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	bool result = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	__le16 fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	if (WARN_ON(skb_queue_empty(skbs)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	skb = skb_peek(skbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	info = IEEE80211_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	sdata = vif_to_sdata(info->control.vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	if (sta && !sta->uploaded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 		sta = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	switch (sdata->vif.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	case NL80211_IFTYPE_MONITOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 		if (sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 			vif = &sdata->vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 		sdata = rcu_dereference(local->monitor_sdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 		if (sdata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 			vif = &sdata->vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 			info->hw_queue =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 				vif->hw_queue[skb_get_queue_mapping(skb)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		} else if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 			ieee80211_purge_tx_queue(&local->hw, skbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 			vif = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	case NL80211_IFTYPE_AP_VLAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 		sdata = container_of(sdata->bss,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 				     struct ieee80211_sub_if_data, u.ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 		vif = &sdata->vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	result = ieee80211_tx_frags(local, vif, sta, skbs, txpending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	ieee80211_tpt_led_trig_tx(local, fc, led_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	WARN_ON_ONCE(!skb_queue_empty(skbs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)  * Invoke TX handlers, return 0 on success and non-zero if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)  * frame was dropped or queued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)  * The handlers are split into an early and late part. The latter is everything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)  * that can be sensitive to reordering, and will be deferred to after packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)  * are dequeued from the intermediate queues (when they are enabled).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) static int invoke_tx_handlers_early(struct ieee80211_tx_data *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	ieee80211_tx_result res = TX_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) #define CALL_TXH(txh) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	do {				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 		res = txh(tx);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 		if (res != TX_CONTINUE)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 			goto txh_done;	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	CALL_TXH(ieee80211_tx_h_dynamic_ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	CALL_TXH(ieee80211_tx_h_check_assoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	CALL_TXH(ieee80211_tx_h_ps_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	CALL_TXH(ieee80211_tx_h_check_control_port_protocol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	CALL_TXH(ieee80211_tx_h_select_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		CALL_TXH(ieee80211_tx_h_rate_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)  txh_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	if (unlikely(res == TX_DROP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 		I802_DEBUG_INC(tx->local->tx_handlers_drop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 		if (tx->skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 			ieee80211_free_txskb(&tx->local->hw, tx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 			ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	} else if (unlikely(res == TX_QUEUED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 		I802_DEBUG_INC(tx->local->tx_handlers_queued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)  * Late handlers can be called while the sta lock is held. Handlers that can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)  * cause packets to be generated will cause deadlock!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	ieee80211_tx_result res = TX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 		__skb_queue_tail(&tx->skbs, tx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 		tx->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 		goto txh_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	CALL_TXH(ieee80211_tx_h_michael_mic_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	CALL_TXH(ieee80211_tx_h_sequence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	CALL_TXH(ieee80211_tx_h_fragment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	/* handlers after fragment must be aware of tx info fragmentation! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	CALL_TXH(ieee80211_tx_h_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	CALL_TXH(ieee80211_tx_h_encrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 		CALL_TXH(ieee80211_tx_h_calculate_duration);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) #undef CALL_TXH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)  txh_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	if (unlikely(res == TX_DROP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 		I802_DEBUG_INC(tx->local->tx_handlers_drop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 		if (tx->skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 			ieee80211_free_txskb(&tx->local->hw, tx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 			ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	} else if (unlikely(res == TX_QUEUED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 		I802_DEBUG_INC(tx->local->tx_handlers_queued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	int r = invoke_tx_handlers_early(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	return invoke_tx_handlers_late(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 			      struct ieee80211_vif *vif, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 			      int band, struct ieee80211_sta **sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	struct ieee80211_tx_data tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	struct sk_buff *skb2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	if (ieee80211_tx_prepare(sdata, &tx, NULL, skb) == TX_DROP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	info->band = band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	info->control.vif = vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	info->hw_queue = vif->hw_queue[skb_get_queue_mapping(skb)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	if (invoke_tx_handlers(&tx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	if (sta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 		if (tx.sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 			*sta = &tx.sta->sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 			*sta = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	/* this function isn't suitable for fragmented data frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	skb2 = __skb_dequeue(&tx.skbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	if (WARN_ON(skb2 != skb || !skb_queue_empty(&tx.skbs))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 		ieee80211_free_txskb(hw, skb2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 		ieee80211_purge_tx_queue(hw, &tx.skbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) EXPORT_SYMBOL(ieee80211_tx_prepare_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)  * Returns false if the frame couldn't be transmitted but was queued instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 			 struct sta_info *sta, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 			 bool txpending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	struct ieee80211_tx_data tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	ieee80211_tx_result res_prepare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	bool result = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	int led_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	if (unlikely(skb->len < 10)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 		dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	/* initialises tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	led_len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	res_prepare = ieee80211_tx_prepare(sdata, &tx, sta, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	if (unlikely(res_prepare == TX_DROP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 		ieee80211_free_txskb(&local->hw, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 	} else if (unlikely(res_prepare == TX_QUEUED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 	/* set up hw_queue value early */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 	if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	    !ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 		info->hw_queue =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 			sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	if (invoke_tx_handlers_early(&tx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	if (!invoke_tx_handlers_late(&tx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 		result = __ieee80211_tx(local, &tx.skbs, led_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 					tx.sta, txpending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) /* device xmit handlers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) enum ieee80211_encrypt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	ENCRYPT_NO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	ENCRYPT_MGMT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	ENCRYPT_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 				struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 				int head_need,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 				enum ieee80211_encrypt encrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	bool enc_tailroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	int tail_need = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	enc_tailroom = encrypt == ENCRYPT_MGMT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 		       (encrypt == ENCRYPT_DATA &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 			sdata->crypto_tx_tailroom_needed_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	if (enc_tailroom) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 		tail_need = IEEE80211_ENCRYPT_TAILROOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 		tail_need -= skb_tailroom(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 		tail_need = max_t(int, tail_need, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	if (skb_cloned(skb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	    (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	     !skb_clone_writable(skb, ETH_HLEN) || enc_tailroom))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 		I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	else if (head_need || tail_need)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		I802_DEBUG_INC(local->tx_expand_skb_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 		wiphy_debug(local->hw.wiphy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 			    "failed to reallocate TX buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 		    struct sta_info *sta, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 	int headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	enum ieee80211_encrypt encrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	if (info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 		encrypt = ENCRYPT_NO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	else if (ieee80211_is_mgmt(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 		encrypt = ENCRYPT_MGMT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 		encrypt = ENCRYPT_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 	headroom = local->tx_headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	if (encrypt != ENCRYPT_NO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 		headroom += sdata->encrypt_headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	headroom -= skb_headroom(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	headroom = max_t(int, 0, headroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	if (ieee80211_skb_resize(sdata, skb, headroom, encrypt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 		ieee80211_free_txskb(&local->hw, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	/* reload after potential resize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	hdr = (struct ieee80211_hdr *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	info->control.vif = &sdata->vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	if (ieee80211_vif_is_mesh(&sdata->vif)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 		if (ieee80211_is_data(hdr->frame_control) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 		    is_unicast_ether_addr(hdr->addr1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 			if (mesh_nexthop_resolve(sdata, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 				return; /* skb queued: don't free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 			ieee80211_mps_set_frame_flags(sdata, NULL, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	ieee80211_set_qos_hdr(sdata, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	ieee80211_tx(sdata, sta, skb, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) static bool ieee80211_validate_radiotap_len(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	struct ieee80211_radiotap_header *rthdr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 		(struct ieee80211_radiotap_header *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	/* check for not even having the fixed radiotap header part */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 		return false; /* too short to be possibly valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	/* is it a header version we can trust to find length from? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	if (unlikely(rthdr->it_version))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 		return false; /* only version 0 is supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	/* does the skb contain enough to deliver on the alleged length? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 		return false; /* skb too short for claimed rt header extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 				 struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	struct ieee80211_radiotap_iterator iterator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	struct ieee80211_radiotap_header *rthdr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 		(struct ieee80211_radiotap_header *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 						   NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	u16 txflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	u16 rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	bool rate_found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	u8 rate_retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	u16 rate_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	u8 mcs_known, mcs_flags, mcs_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	u16 vht_known;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	u8 vht_mcs = 0, vht_nss = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	if (!ieee80211_validate_radiotap_len(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 		       IEEE80211_TX_CTL_DONTFRAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	 * for every radiotap entry that is present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	 * (ieee80211_radiotap_iterator_next returns -ENOENT when no more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	 * entries present, or -EINVAL on error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	while (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 		ret = ieee80211_radiotap_iterator_next(&iterator);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 		/* see if this argument is something we can use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 		switch (iterator.this_arg_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 		 * You must take care when dereferencing iterator.this_arg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 		 * for multibyte types... the pointer is not aligned.  Use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 		 * get_unaligned((type *)iterator.this_arg) to dereference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 		 * iterator.this_arg for type "type" safely on all arches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 		*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 		case IEEE80211_RADIOTAP_FLAGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 			if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 				 * this indicates that the skb we have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 				 * handed has the 32-bit FCS CRC at the end...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 				 * we should react to that by snipping it off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 				 * because it will be recomputed and added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 				 * on transmission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 				if (skb->len < (iterator._max_length + FCS_LEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 					return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 				skb_trim(skb, skb->len - FCS_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 			if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 				info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 			if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 				info->flags &= ~IEEE80211_TX_CTL_DONTFRAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 		case IEEE80211_RADIOTAP_TX_FLAGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 			txflags = get_unaligned_le16(iterator.this_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 			if (txflags & IEEE80211_RADIOTAP_F_TX_NOACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 				info->flags |= IEEE80211_TX_CTL_NO_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 			if (txflags & IEEE80211_RADIOTAP_F_TX_NOSEQNO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 				info->control.flags |= IEEE80211_TX_CTRL_NO_SEQNO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 		case IEEE80211_RADIOTAP_RATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 			rate = *iterator.this_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 			rate_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 			rate_found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 		case IEEE80211_RADIOTAP_DATA_RETRIES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 			rate_retries = *iterator.this_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 		case IEEE80211_RADIOTAP_MCS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 			mcs_known = iterator.this_arg[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 			mcs_flags = iterator.this_arg[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 			if (!(mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_MCS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 			rate_found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 			rate = iterator.this_arg[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 			rate_flags = IEEE80211_TX_RC_MCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 			if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_GI &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 			    mcs_flags & IEEE80211_RADIOTAP_MCS_SGI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 				rate_flags |= IEEE80211_TX_RC_SHORT_GI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 			mcs_bw = mcs_flags & IEEE80211_RADIOTAP_MCS_BW_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 			if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_BW &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 			    mcs_bw == IEEE80211_RADIOTAP_MCS_BW_40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 				rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 		case IEEE80211_RADIOTAP_VHT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 			vht_known = get_unaligned_le16(iterator.this_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 			rate_found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 			rate_flags = IEEE80211_TX_RC_VHT_MCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 			if ((vht_known & IEEE80211_RADIOTAP_VHT_KNOWN_GI) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 			    (iterator.this_arg[2] &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 			     IEEE80211_RADIOTAP_VHT_FLAG_SGI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 				rate_flags |= IEEE80211_TX_RC_SHORT_GI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 			if (vht_known &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 			    IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 				if (iterator.this_arg[3] == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 					rate_flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 						IEEE80211_TX_RC_40_MHZ_WIDTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 				else if (iterator.this_arg[3] == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 					rate_flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 						IEEE80211_TX_RC_80_MHZ_WIDTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 				else if (iterator.this_arg[3] == 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 					rate_flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 						IEEE80211_TX_RC_160_MHZ_WIDTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 			vht_mcs = iterator.this_arg[4] >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 			if (vht_mcs > 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 				vht_mcs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 			vht_nss = iterator.this_arg[4] & 0xF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 			if (!vht_nss || vht_nss > 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 				vht_nss = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 		 * Please update the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 		 * Documentation/networking/mac80211-injection.rst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 		 * when parsing new fields here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	if (rate_found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 		struct ieee80211_supported_band *sband =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 			local->hw.wiphy->bands[info->band];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 		info->control.flags |= IEEE80211_TX_CTRL_RATE_INJECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 		for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 			info->control.rates[i].idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 			info->control.rates[i].flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 			info->control.rates[i].count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 		if (rate_flags & IEEE80211_TX_RC_MCS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 			info->control.rates[0].idx = rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 		} else if (rate_flags & IEEE80211_TX_RC_VHT_MCS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 			ieee80211_rate_set_vht(info->control.rates, vht_mcs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 					       vht_nss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 		} else if (sband) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 			for (i = 0; i < sband->n_bitrates; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 				if (rate * 5 != sband->bitrates[i].bitrate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 				info->control.rates[0].idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 		if (info->control.rates[0].idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 			info->control.flags &= ~IEEE80211_TX_CTRL_RATE_INJECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 		info->control.rates[0].flags = rate_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 		info->control.rates[0].count = min_t(u8, rate_retries + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 						     local->hw.max_rate_tries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 					 struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	struct ieee80211_chanctx_conf *chanctx_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	struct ieee80211_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 	struct ieee80211_sub_if_data *tmp_sdata, *sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	struct cfg80211_chan_def *chandef;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 	u16 len_rthdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 	int hdrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 	memset(info, 0, sizeof(*info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 	info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 		      IEEE80211_TX_CTL_INJECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 	/* Sanity-check the length of the radiotap header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 	if (!ieee80211_validate_radiotap_len(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 	/* we now know there is a radiotap header with a length we can use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 	len_rthdr = ieee80211_get_radiotap_len(skb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	 * fix up the pointers accounting for the radiotap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 	 * header still being in there.  We are being given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	 * a precooked IEEE80211 header so no need for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 	 * normal processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 	skb_set_mac_header(skb, len_rthdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 	 * these are just fixed to the end of the rt area since we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	 * don't have any better information and at this point, nobody cares
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	skb_set_network_header(skb, len_rthdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	skb_set_transport_header(skb, len_rthdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	if (skb->len < len_rthdr + 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 	hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	if (skb->len < len_rthdr + hdrlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	 * Initialize skb->protocol if the injected frame is a data frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	 * carrying a rfc1042 header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 	if (ieee80211_is_data(hdr->frame_control) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	    skb->len >= len_rthdr + hdrlen + sizeof(rfc1042_header) + 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 		u8 *payload = (u8 *)hdr + hdrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 		if (ether_addr_equal(payload, rfc1042_header))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 			skb->protocol = cpu_to_be16((payload[6] << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 						    payload[7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	 * Initialize skb->priority for QoS frames. This is put in the TID field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	 * of the frame before passing it to the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 	if (ieee80211_is_data_qos(hdr->frame_control)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 		u8 *p = ieee80211_get_qos_ctl(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 		skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 	 * We process outgoing injected frames that have a local address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	 * we handle as though they are non-injected frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 	 * This code here isn't entirely correct, the local MAC address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	 * isn't always enough to find the interface to use; for proper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	 * VLAN/WDS support we will need a different mechanism (which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	 * likely isn't going to be monitor interfaces).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	 * This is necessary, for example, for old hostapd versions that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	 * don't use nl80211-based management TX/RX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	list_for_each_entry_rcu(tmp_sdata, &local->interfaces, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 		if (!ieee80211_sdata_running(tmp_sdata))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 		if (tmp_sdata->vif.type == NL80211_IFTYPE_MONITOR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 		    tmp_sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 		    tmp_sdata->vif.type == NL80211_IFTYPE_WDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 		if (ether_addr_equal(tmp_sdata->vif.addr, hdr->addr2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 			sdata = tmp_sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	if (!chanctx_conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 		tmp_sdata = rcu_dereference(local->monitor_sdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 		if (tmp_sdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 			chanctx_conf =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 				rcu_dereference(tmp_sdata->vif.chanctx_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	if (chanctx_conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 		chandef = &chanctx_conf->def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	else if (!local->use_chanctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 		chandef = &local->_oper_chandef;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 		goto fail_rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	 * Frame injection is not allowed if beaconing is not allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 	 * or if we need radar detection. Beaconing is usually not allowed when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 	 * the mode or operation (Adhoc, AP, Mesh) does not support DFS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 	 * Passive scan is also used in world regulatory domains where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	 * your country is not known and as such it should be treated as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 	 * NO TX unless the channel is explicitly allowed in which case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	 * your current regulatory domain would not have the passive scan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	 * flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 	 * Since AP mode uses monitor interfaces to inject/TX management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 	 * frames we can make AP mode the exception to this rule once it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 	 * supports radar detection as its implementation can deal with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 	 * radar detection by itself. We can do that later by adding a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	 * monitor flag interfaces used for AP support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	if (!cfg80211_reg_can_beacon(local->hw.wiphy, chandef,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 				     sdata->vif.type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 		goto fail_rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 	info->band = chandef->chan->band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	 * Process the radiotap header. This will now take into account the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	 * selected chandef above to accurately set injection rates and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 	 * retransmissions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 	if (!ieee80211_parse_tx_radiotap(skb, dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 		goto fail_rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 	/* remove the injection radiotap header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	skb_pull(skb, len_rthdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 	ieee80211_xmit(sdata, NULL, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) fail_rcu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 	dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 	return NETDEV_TX_OK; /* meaning, we dealt with the skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) static inline bool ieee80211_is_tdls_setup(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 	u16 ethertype = (skb->data[12] << 8) | skb->data[13];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	return ethertype == ETH_P_TDLS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	       skb->len > 14 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 	       skb->data[14] == WLAN_TDLS_SNAP_RFTYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 			    struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 			    struct sta_info **sta_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 	struct sta_info *sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	switch (sdata->vif.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	case NL80211_IFTYPE_AP_VLAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 		sta = rcu_dereference(sdata->u.vlan.sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 		if (sta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 			*sta_out = sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 		} else if (sdata->wdev.use_4addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 			return -ENOLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 	case NL80211_IFTYPE_AP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 	case NL80211_IFTYPE_OCB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 	case NL80211_IFTYPE_ADHOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 		if (is_multicast_ether_addr(skb->data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 			*sta_out = ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 		sta = sta_info_get_bss(sdata, skb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 	case NL80211_IFTYPE_WDS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 		sta = sta_info_get(sdata, sdata->u.wds.remote_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) #ifdef CONFIG_MAC80211_MESH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 	case NL80211_IFTYPE_MESH_POINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 		/* determined much later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 		*sta_out = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 	case NL80211_IFTYPE_STATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 		if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 			sta = sta_info_get(sdata, skb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 			if (sta && test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 				if (test_sta_flag(sta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 						  WLAN_STA_TDLS_PEER_AUTH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 					*sta_out = sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 					return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 				 * TDLS link during setup - throw out frames to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 				 * peer. Allow TDLS-setup frames to unauthorized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 				 * peers for the special case of a link teardown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 				 * after a TDLS sta is removed due to being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 				 * unreachable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 				if (!ieee80211_is_tdls_setup(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 					return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 		sta = sta_info_get(sdata, sdata->u.mgd.bssid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 		if (!sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 			return -ENOLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 	*sta_out = sta ?: ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) static u16 ieee80211_store_ack_skb(struct ieee80211_local *local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 				   struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 				   u32 *info_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 				   u64 *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 	struct sk_buff *ack_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 	u16 info_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 	if (skb->sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 		ack_skb = skb_clone_sk(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 		ack_skb = skb_clone(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 	if (ack_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 		unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 		int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 		spin_lock_irqsave(&local->ack_status_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 		id = idr_alloc(&local->ack_status_frames, ack_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 			       1, 0x2000, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 		spin_unlock_irqrestore(&local->ack_status_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 		if (id >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 			info_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 			*info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 			if (cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 				*cookie = ieee80211_mgmt_tx_cookie(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 				IEEE80211_SKB_CB(ack_skb)->ack.cookie = *cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 			kfree_skb(ack_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 	return info_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511)  * ieee80211_build_hdr - build 802.11 header in the given frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512)  * @sdata: virtual interface to build the header for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513)  * @skb: the skb to build the header in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514)  * @info_flags: skb flags to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515)  * @sta: the station pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516)  * @ctrl_flags: info control flags to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517)  * @cookie: cookie pointer to fill (if not %NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519)  * This function takes the skb with 802.3 header and reformats the header to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520)  * the appropriate IEEE 802.11 header based on which interface the packet is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521)  * being transmitted on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523)  * Note that this function also takes care of the TX status request and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)  * potential unsharing of the SKB - this needs to be interleaved with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525)  * header building.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527)  * The function requires the read-side RCU lock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529)  * Returns: the (possibly reallocated) skb or an ERR_PTR() code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 					   struct sk_buff *skb, u32 info_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 					   struct sta_info *sta, u32 ctrl_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 					   u64 *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 	struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 	struct ieee80211_tx_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 	int head_need;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 	u16 ethertype, hdrlen,  meshhdrlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 	__le16 fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	struct ieee80211_hdr hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 	struct ieee80211s_hdr mesh_hdr __maybe_unused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 	struct mesh_path __maybe_unused *mppath = NULL, *mpath = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 	const u8 *encaps_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 	int encaps_len, skip_header_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 	bool wme_sta = false, authorized = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 	bool tdls_peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 	bool multicast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 	u16 info_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 	struct ieee80211_chanctx_conf *chanctx_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 	struct ieee80211_sub_if_data *ap_sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 	enum nl80211_band band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 	if (IS_ERR(sta))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 		sta = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) #ifdef CONFIG_MAC80211_DEBUGFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 	if (local->force_tx_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 		info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 	/* convert Ethernet header to proper 802.11 header (based on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 	 * operation mode) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 	ethertype = (skb->data[12] << 8) | skb->data[13];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 	fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 	switch (sdata->vif.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 	case NL80211_IFTYPE_AP_VLAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 		if (sdata->wdev.use_4addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 			fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 			/* RA TA DA SA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 			memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 			memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 			memcpy(hdr.addr3, skb->data, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 			memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 			hdrlen = 30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 			authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 			wme_sta = sta->sta.wme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 		ap_sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 					u.ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 		chanctx_conf = rcu_dereference(ap_sdata->vif.chanctx_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 		if (!chanctx_conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 			ret = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 			goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 		band = chanctx_conf->def.chan->band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 		if (sdata->wdev.use_4addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 	case NL80211_IFTYPE_AP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 		if (sdata->vif.type == NL80211_IFTYPE_AP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 			chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 		if (!chanctx_conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 			ret = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 			goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 		fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 		/* DA BSSID SA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 		memcpy(hdr.addr1, skb->data, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 		memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 		memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 		hdrlen = 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 		band = chanctx_conf->def.chan->band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 	case NL80211_IFTYPE_WDS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 		fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 		/* RA TA DA SA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 		memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 		memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 		memcpy(hdr.addr3, skb->data, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 		memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 		hdrlen = 30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 		 * This is the exception! WDS style interfaces are prohibited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 		 * when channel contexts are in used so this must be valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 		band = local->hw.conf.chandef.chan->band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) #ifdef CONFIG_MAC80211_MESH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 	case NL80211_IFTYPE_MESH_POINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 		if (!is_multicast_ether_addr(skb->data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 			struct sta_info *next_hop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 			bool mpp_lookup = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 			mpath = mesh_path_lookup(sdata, skb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 			if (mpath) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 				mpp_lookup = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 				next_hop = rcu_dereference(mpath->next_hop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 				if (!next_hop ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 				    !(mpath->flags & (MESH_PATH_ACTIVE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 						      MESH_PATH_RESOLVING)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 					mpp_lookup = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 			if (mpp_lookup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 				mppath = mpp_path_lookup(sdata, skb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 				if (mppath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 					mppath->exp_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 			if (mppath && mpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 				mesh_path_del(sdata, mpath->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 		 * Use address extension if it is a packet from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 		 * another interface or if we know the destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 		 * is being proxied by a portal (i.e. portal address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 		 * differs from proxied address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 		if (ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 		    !(mppath && !ether_addr_equal(mppath->mpp, skb->data))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 			hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 					skb->data, skb->data + ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 			meshhdrlen = ieee80211_new_mesh_header(sdata, &mesh_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 							       NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 			/* DS -> MBSS (802.11-2012 13.11.3.3).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 			 * For unicast with unknown forwarding information,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 			 * destination might be in the MBSS or if that fails
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 			 * forwarded to another mesh gate. In either case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 			 * resolution will be handled in ieee80211_xmit(), so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 			 * leave the original DA. This also works for mcast */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 			const u8 *mesh_da = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 			if (mppath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 				mesh_da = mppath->mpp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 			else if (mpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 				mesh_da = mpath->dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 			hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 					mesh_da, sdata->vif.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 			if (is_multicast_ether_addr(mesh_da))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 				/* DA TA mSA AE:SA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 				meshhdrlen = ieee80211_new_mesh_header(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 						sdata, &mesh_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 						skb->data + ETH_ALEN, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 				/* RA TA mDA mSA AE:DA SA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 				meshhdrlen = ieee80211_new_mesh_header(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 						sdata, &mesh_hdr, skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 						skb->data + ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 		chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 		if (!chanctx_conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 			ret = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 			goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 		band = chanctx_conf->def.chan->band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 		/* For injected frames, fill RA right away as nexthop lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 		 * will be skipped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 		if ((ctrl_flags & IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 		    is_zero_ether_addr(hdr.addr1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 			memcpy(hdr.addr1, skb->data, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 	case NL80211_IFTYPE_STATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 		/* we already did checks when looking up the RA STA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 		tdls_peer = test_sta_flag(sta, WLAN_STA_TDLS_PEER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 		if (tdls_peer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 			/* DA SA BSSID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 			memcpy(hdr.addr1, skb->data, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 			memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 			memcpy(hdr.addr3, sdata->u.mgd.bssid, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 			hdrlen = 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 		}  else if (sdata->u.mgd.use_4addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 			    cpu_to_be16(ethertype) != sdata->control_port_protocol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 			fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 					  IEEE80211_FCTL_TODS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 			/* RA TA DA SA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 			memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 			memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 			memcpy(hdr.addr3, skb->data, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 			memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 			hdrlen = 30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 			fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 			/* BSSID SA DA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 			memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 			memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 			memcpy(hdr.addr3, skb->data, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 			hdrlen = 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 		chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 		if (!chanctx_conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 			ret = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 			goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 		band = chanctx_conf->def.chan->band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 	case NL80211_IFTYPE_OCB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 		/* DA SA BSSID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 		memcpy(hdr.addr1, skb->data, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 		memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 		eth_broadcast_addr(hdr.addr3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 		hdrlen = 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 		chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 		if (!chanctx_conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 			ret = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 			goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 		band = chanctx_conf->def.chan->band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 	case NL80211_IFTYPE_ADHOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 		/* DA SA BSSID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 		memcpy(hdr.addr1, skb->data, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 		memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 		memcpy(hdr.addr3, sdata->u.ibss.bssid, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 		hdrlen = 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 		chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 		if (!chanctx_conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 			ret = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 			goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 		band = chanctx_conf->def.chan->band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 		goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 	multicast = is_multicast_ether_addr(hdr.addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 	/* sta is always NULL for mesh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 	if (sta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 		authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 		wme_sta = sta->sta.wme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 	} else if (ieee80211_vif_is_mesh(&sdata->vif)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 		/* For mesh, the use of the QoS header is mandatory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 		wme_sta = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 	/* receiver does QoS (which also means we do) use it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 	if (wme_sta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 		fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 		hdrlen += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 	 * Drop unicast frames to unauthorised stations unless they are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 	 * EAPOL frames from the local station.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 	if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 		     (sdata->vif.type != NL80211_IFTYPE_OCB) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 		     !multicast && !authorized &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 		     (cpu_to_be16(ethertype) != sdata->control_port_protocol ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 		      !ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 		net_info_ratelimited("%s: dropped frame to %pM (unauthorized port)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 				    sdata->name, hdr.addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 		I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 		ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 		goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 	if (unlikely(!multicast && ((skb->sk &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 		     skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 		     ctrl_flags & IEEE80211_TX_CTL_REQ_TX_STATUS)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 		info_id = ieee80211_store_ack_skb(local, skb, &info_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 						  cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 	 * If the skb is shared we need to obtain our own copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 	if (skb_shared(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 		struct sk_buff *tmp_skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 		/* can't happen -- skb is a clone if info_id != 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 		WARN_ON(info_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 		skb = skb_clone(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 		kfree_skb(tmp_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 		if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 			goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 	hdr.frame_control = fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 	hdr.duration_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 	hdr.seq_ctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 	skip_header_bytes = ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 	if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 		encaps_data = bridge_tunnel_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 		encaps_len = sizeof(bridge_tunnel_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 		skip_header_bytes -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 	} else if (ethertype >= ETH_P_802_3_MIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 		encaps_data = rfc1042_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 		encaps_len = sizeof(rfc1042_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 		skip_header_bytes -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 		encaps_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 		encaps_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 	skb_pull(skb, skip_header_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 	head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 	 * So we need to modify the skb header and hence need a copy of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 	 * that. The head_need variable above doesn't, so far, include
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 	 * the needed header space that we don't need right away. If we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 	 * can, then we don't reallocate right now but only after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 	 * frame arrives at the master device (if it does...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 	 * If we cannot, however, then we will reallocate to include all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 	 * the ever needed space. Also, if we need to reallocate it anyway,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 	 * make it big enough for everything we may ever need.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 	if (head_need > 0 || skb_cloned(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 		head_need += sdata->encrypt_headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 		head_need += local->tx_headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 		head_need = max_t(int, 0, head_need);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 		if (ieee80211_skb_resize(sdata, skb, head_need, ENCRYPT_DATA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 			ieee80211_free_txskb(&local->hw, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 			skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 			return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 	if (encaps_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 		memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) #ifdef CONFIG_MAC80211_MESH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 	if (meshhdrlen > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 		memcpy(skb_push(skb, meshhdrlen), &mesh_hdr, meshhdrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 	if (ieee80211_is_data_qos(fc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 		__le16 *qos_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 		qos_control = skb_push(skb, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 		memcpy(skb_push(skb, hdrlen - 2), &hdr, hdrlen - 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 		 * Maybe we could actually set some fields here, for now just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 		 * initialise to zero to indicate no special operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 		*qos_control = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 		memcpy(skb_push(skb, hdrlen), &hdr, hdrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 	skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 	info = IEEE80211_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 	memset(info, 0, sizeof(*info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 	info->flags = info_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 	info->ack_frame_id = info_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 	info->band = band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 	info->control.flags = ctrl_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905)  free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 	return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911)  * fast-xmit overview
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913)  * The core idea of this fast-xmit is to remove per-packet checks by checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914)  * them out of band. ieee80211_check_fast_xmit() implements the out-of-band
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915)  * checks that are needed to get the sta->fast_tx pointer assigned, after which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916)  * much less work can be done per packet. For example, fragmentation must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917)  * disabled or the fast_tx pointer will not be set. All the conditions are seen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918)  * in the code here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920)  * Once assigned, the fast_tx data structure also caches the per-packet 802.11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921)  * header and other data to aid packet processing in ieee80211_xmit_fast().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923)  * The most difficult part of this is that when any of these assumptions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924)  * change, an external trigger (i.e. a call to ieee80211_clear_fast_xmit(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925)  * ieee80211_check_fast_xmit() or friends) is required to reset the data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926)  * since the per-packet code no longer checks the conditions. This is reflected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927)  * by the calls to these functions throughout the rest of the code, and must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928)  * maintained if any of the TX path checks change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) void ieee80211_check_fast_xmit(struct sta_info *sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 	struct ieee80211_fast_tx build = {}, *fast_tx = NULL, *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 	struct ieee80211_local *local = sta->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 	struct ieee80211_sub_if_data *sdata = sta->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 	struct ieee80211_hdr *hdr = (void *)build.hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 	struct ieee80211_chanctx_conf *chanctx_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 	__le16 fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 	if (!ieee80211_hw_check(&local->hw, SUPPORT_FAST_XMIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 	/* Locking here protects both the pointer itself, and against concurrent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 	 * invocations winning data access races to, e.g., the key pointer that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 	 * is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 	 * Without it, the invocation of this function right after the key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 	 * pointer changes wouldn't be sufficient, as another CPU could access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 	 * the pointer, then stall, and then do the cache update after the CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 	 * that invalidated the key.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 	 * With the locking, such scenarios cannot happen as the check for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 	 * key and the fast-tx assignment are done atomically, so the CPU that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 	 * modifies the key will either wait or other one will see the key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 	 * cleared/changed already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 	spin_lock_bh(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 	if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 	    !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 	    sdata->vif.type == NL80211_IFTYPE_STATION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 	if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 	if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 	    test_sta_flag(sta, WLAN_STA_PS_DRIVER) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 	    test_sta_flag(sta, WLAN_STA_PS_DELIVER) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 	    test_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 	if (sdata->noack_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 	/* fast-xmit doesn't handle fragmentation at all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 	if (local->hw.wiphy->frag_threshold != (u32)-1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 	    !ieee80211_hw_check(&local->hw, SUPPORTS_TX_FRAG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 	if (!chanctx_conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 	build.band = chanctx_conf->def.chan->band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 	fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 	switch (sdata->vif.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 	case NL80211_IFTYPE_ADHOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 		/* DA SA BSSID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 		build.da_offs = offsetof(struct ieee80211_hdr, addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 		build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 		memcpy(hdr->addr3, sdata->u.ibss.bssid, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 		build.hdr_len = 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 	case NL80211_IFTYPE_STATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 		if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 			/* DA SA BSSID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 			build.da_offs = offsetof(struct ieee80211_hdr, addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 			build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 			memcpy(hdr->addr3, sdata->u.mgd.bssid, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 			build.hdr_len = 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 		if (sdata->u.mgd.use_4addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 			/* non-regular ethertype cannot use the fastpath */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 			fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 					  IEEE80211_FCTL_TODS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 			/* RA TA DA SA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 			memcpy(hdr->addr1, sdata->u.mgd.bssid, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 			memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 			build.da_offs = offsetof(struct ieee80211_hdr, addr3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 			build.sa_offs = offsetof(struct ieee80211_hdr, addr4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 			build.hdr_len = 30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 		fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 		/* BSSID SA DA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 		memcpy(hdr->addr1, sdata->u.mgd.bssid, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 		build.da_offs = offsetof(struct ieee80211_hdr, addr3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 		build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 		build.hdr_len = 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 	case NL80211_IFTYPE_AP_VLAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 		if (sdata->wdev.use_4addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 			fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 					  IEEE80211_FCTL_TODS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 			/* RA TA DA SA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 			memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 			memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 			build.da_offs = offsetof(struct ieee80211_hdr, addr3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 			build.sa_offs = offsetof(struct ieee80211_hdr, addr4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 			build.hdr_len = 30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 	case NL80211_IFTYPE_AP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 		fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 		/* DA BSSID SA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 		build.da_offs = offsetof(struct ieee80211_hdr, addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 		memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 		build.sa_offs = offsetof(struct ieee80211_hdr, addr3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 		build.hdr_len = 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 		/* not handled on fast-xmit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 	if (sta->sta.wme) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 		build.hdr_len += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 		fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 	/* We store the key here so there's no point in using rcu_dereference()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 	 * but that's fine because the code that changes the pointers will call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 	 * this function after doing so. For a single CPU that would be enough,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 	 * for multiple see the comment above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 	build.key = rcu_access_pointer(sta->ptk[sta->ptk_idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 	if (!build.key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 		build.key = rcu_access_pointer(sdata->default_unicast_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 	if (build.key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 		bool gen_iv, iv_spc, mmic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 		gen_iv = build.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 		iv_spc = build.key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 		mmic = build.key->conf.flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 			(IEEE80211_KEY_FLAG_GENERATE_MMIC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 			 IEEE80211_KEY_FLAG_PUT_MIC_SPACE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 		/* don't handle software crypto */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 		if (!(build.key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 		/* Key is being removed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 		if (build.key->flags & KEY_FLAG_TAINTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 		switch (build.key->conf.cipher) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 		case WLAN_CIPHER_SUITE_CCMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 		case WLAN_CIPHER_SUITE_CCMP_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 			if (gen_iv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 				build.pn_offs = build.hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 			if (gen_iv || iv_spc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 				build.hdr_len += IEEE80211_CCMP_HDR_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 		case WLAN_CIPHER_SUITE_GCMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 		case WLAN_CIPHER_SUITE_GCMP_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 			if (gen_iv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 				build.pn_offs = build.hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 			if (gen_iv || iv_spc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 				build.hdr_len += IEEE80211_GCMP_HDR_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 		case WLAN_CIPHER_SUITE_TKIP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) 			/* cannot handle MMIC or IV generation in xmit-fast */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) 			if (mmic || gen_iv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) 			if (iv_spc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 				build.hdr_len += IEEE80211_TKIP_IV_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 		case WLAN_CIPHER_SUITE_WEP40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 		case WLAN_CIPHER_SUITE_WEP104:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 			/* cannot handle IV generation in fast-xmit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 			if (gen_iv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 			if (iv_spc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 				build.hdr_len += IEEE80211_WEP_IV_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 		case WLAN_CIPHER_SUITE_AES_CMAC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 		case WLAN_CIPHER_SUITE_BIP_CMAC_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 		case WLAN_CIPHER_SUITE_BIP_GMAC_128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 		case WLAN_CIPHER_SUITE_BIP_GMAC_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 			WARN(1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 			     "management cipher suite 0x%x enabled for data\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 			     build.key->conf.cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 			/* we don't know how to generate IVs for this at all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 			if (WARN_ON(gen_iv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 			/* pure hardware keys are OK, of course */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 			if (!(build.key->flags & KEY_FLAG_CIPHER_SCHEME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 			/* cipher scheme might require space allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 			if (iv_spc &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 			    build.key->conf.iv_len > IEEE80211_FAST_XMIT_MAX_IV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 			if (iv_spc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 				build.hdr_len += build.key->conf.iv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 		fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 	hdr->frame_control = fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 	memcpy(build.hdr + build.hdr_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 	       rfc1042_header,  sizeof(rfc1042_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 	build.hdr_len += sizeof(rfc1042_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 	fast_tx = kmemdup(&build, sizeof(build), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 	/* if the kmemdup fails, continue w/o fast_tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 	if (!fast_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 	/* we might have raced against another call to this function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 	old = rcu_dereference_protected(sta->fast_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 					lockdep_is_held(&sta->lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 	rcu_assign_pointer(sta->fast_tx, fast_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 	if (old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 		kfree_rcu(old, rcu_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 	spin_unlock_bh(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) void ieee80211_check_fast_xmit_all(struct ieee80211_local *local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 	struct sta_info *sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 	list_for_each_entry_rcu(sta, &local->sta_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 		ieee80211_check_fast_xmit(sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) void ieee80211_check_fast_xmit_iface(struct ieee80211_sub_if_data *sdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 	struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 	struct sta_info *sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 	list_for_each_entry_rcu(sta, &local->sta_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 		if (sdata != sta->sdata &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 		    (!sta->sdata->bss || sta->sdata->bss != sdata->bss))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 		ieee80211_check_fast_xmit(sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) void ieee80211_clear_fast_xmit(struct sta_info *sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 	struct ieee80211_fast_tx *fast_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 	spin_lock_bh(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 	fast_tx = rcu_dereference_protected(sta->fast_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 					    lockdep_is_held(&sta->lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 	RCU_INIT_POINTER(sta->fast_tx, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 	spin_unlock_bh(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 	if (fast_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 		kfree_rcu(fast_tx, rcu_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 					struct sk_buff *skb, int headroom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 	if (skb_headroom(skb) < headroom) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 		I802_DEBUG_INC(local->tx_expand_skb_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 		if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 			wiphy_debug(local->hw.wiphy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 				    "failed to reallocate TX buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 					 struct ieee80211_fast_tx *fast_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 					 struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 	struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 	struct ieee80211_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 	struct ethhdr *amsdu_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 	int hdr_len = fast_tx->hdr_len - sizeof(rfc1042_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 	int subframe_len = skb->len - hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 	void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 	u8 *qc, *h_80211_src, *h_80211_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 	const u8 *bssid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 	if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 	if (info->control.flags & IEEE80211_TX_CTRL_AMSDU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 	if (!ieee80211_amsdu_realloc_pad(local, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 					 sizeof(*amsdu_hdr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 					 local->hw.extra_tx_headroom))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 	data = skb_push(skb, sizeof(*amsdu_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 	memmove(data, data + sizeof(*amsdu_hdr), hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 	hdr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 	amsdu_hdr = data + hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 	/* h_80211_src/dst is addr* field within hdr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 	h_80211_src = data + fast_tx->sa_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 	h_80211_dst = data + fast_tx->da_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 	amsdu_hdr->h_proto = cpu_to_be16(subframe_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 	ether_addr_copy(amsdu_hdr->h_source, h_80211_src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 	ether_addr_copy(amsdu_hdr->h_dest, h_80211_dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 	/* according to IEEE 802.11-2012 8.3.2 table 8-19, the outer SA/DA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 	 * fields needs to be changed to BSSID for A-MSDU frames depending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 	 * on FromDS/ToDS values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 	switch (sdata->vif.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 	case NL80211_IFTYPE_STATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 		bssid = sdata->u.mgd.bssid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 	case NL80211_IFTYPE_AP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 	case NL80211_IFTYPE_AP_VLAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 		bssid = sdata->vif.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) 		bssid = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 	if (bssid && ieee80211_has_fromds(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 		ether_addr_copy(h_80211_src, bssid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 	if (bssid && ieee80211_has_tods(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 		ether_addr_copy(h_80211_dst, bssid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 	qc = ieee80211_get_qos_ctl(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 	*qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 	info->control.flags |= IEEE80211_TX_CTRL_AMSDU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 				      struct sta_info *sta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 				      struct ieee80211_fast_tx *fast_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 				      struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 	struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 	struct fq *fq = &local->fq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 	struct fq_tin *tin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 	struct fq_flow *flow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 	u8 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 	struct ieee80211_txq *txq = sta->sta.txq[tid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 	struct txq_info *txqi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 	struct sk_buff **frag_tail, *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 	int subframe_len = skb->len - ETH_ALEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 	u8 max_subframes = sta->sta.max_amsdu_subframes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 	int max_frags = local->hw.max_tx_fragments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 	int max_amsdu_len = sta->sta.max_amsdu_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 	int orig_truesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) 	u32 flow_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 	__be16 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 	void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 	bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 	unsigned int orig_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 	int n = 2, nfrags, pad = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) 	u16 hdrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 	if (!ieee80211_hw_check(&local->hw, TX_AMSDU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) 	if (skb_is_gso(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) 	if (!txq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) 	txqi = to_txq_info(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 	if (test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 	if (sta->sta.max_rc_amsdu_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 		max_amsdu_len = min_t(int, max_amsdu_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 				      sta->sta.max_rc_amsdu_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 	if (sta->sta.max_tid_amsdu_len[tid])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 		max_amsdu_len = min_t(int, max_amsdu_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 				      sta->sta.max_tid_amsdu_len[tid]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 	flow_idx = fq_flow_idx(fq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 	spin_lock_bh(&fq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 	/* TODO: Ideally aggregation should be done on dequeue to remain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 	 * responsive to environment changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 	tin = &txqi->tin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 	flow = fq_flow_classify(fq, tin, flow_idx, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) 				fq_flow_get_default_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) 	head = skb_peek_tail(&flow->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) 	if (!head || skb_is_gso(head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) 	orig_truesize = head->truesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 	orig_len = head->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 	if (skb->len + head->len > max_amsdu_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) 	nfrags = 1 + skb_shinfo(skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) 	nfrags += 1 + skb_shinfo(head)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) 	frag_tail = &skb_shinfo(head)->frag_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) 	while (*frag_tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) 		nfrags += 1 + skb_shinfo(*frag_tail)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) 		frag_tail = &(*frag_tail)->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 		n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) 	if (max_subframes && n > max_subframes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 	if (max_frags && nfrags > max_frags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 	if (!drv_can_aggregate_in_amsdu(local, head, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) 	if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) 	/* If n == 2, the "while (*frag_tail)" loop above didn't execute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) 	 * and  frag_tail should be &skb_shinfo(head)->frag_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) 	 * However, ieee80211_amsdu_prepare_head() can reallocate it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) 	 * Reload frag_tail to have it pointing to the correct place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) 	if (n == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) 		frag_tail = &skb_shinfo(head)->frag_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 	 * Pad out the previous subframe to a multiple of 4 by adding the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 	 * padding to the next one, that's being added. Note that head->len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 	 * is the length of the full A-MSDU, but that works since each time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 	 * we add a new subframe we pad out the previous one to a multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 	 * of 4 and thus it no longer matters in the next round.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 	hdrlen = fast_tx->hdr_len - sizeof(rfc1042_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 	if ((head->len - hdrlen) & 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 		pad = 4 - ((head->len - hdrlen) & 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 	if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) 						     2 + pad))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 		goto out_recalc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 	ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 	data = skb_push(skb, ETH_ALEN + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) 	memmove(data, data + ETH_ALEN + 2, 2 * ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 	data += 2 * ETH_ALEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 	len = cpu_to_be16(subframe_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 	memcpy(data, &len, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 	memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 	memset(skb_push(skb, pad), 0, pad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 	head->len += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 	head->data_len += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 	*frag_tail = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) out_recalc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 	fq->memory_usage += head->truesize - orig_truesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 	if (head->len != orig_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 		flow->backlog += head->len - orig_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 		tin->backlog_bytes += head->len - orig_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 		fq_recalc_backlog(fq, tin, flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) 	spin_unlock_bh(&fq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425)  * Can be called while the sta lock is held. Anything that can cause packets to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426)  * be generated will cause deadlock!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) static void ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) 				       struct sta_info *sta, u8 pn_offs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) 				       struct ieee80211_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 				       struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) 	struct ieee80211_hdr *hdr = (void *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) 	u8 tid = IEEE80211_NUM_TIDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 	if (key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 		info->control.hw_key = &key->conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) 	ieee80211_tx_stats(skb->dev, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) 	if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 		tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) 		hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) 		info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) 		hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) 		sdata->sequence_number += 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) 	if (skb_shinfo(skb)->gso_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) 		sta->tx_stats.msdu[tid] +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) 			DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) 		sta->tx_stats.msdu[tid]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) 	info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) 	/* statistics normally done by ieee80211_tx_h_stats (but that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) 	 * has to consider fragmentation, so is more complex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) 	sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 	sta->tx_stats.packets[skb_get_queue_mapping(skb)]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) 	if (pn_offs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 		u64 pn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 		u8 *crypto_hdr = skb->data + pn_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 		switch (key->conf.cipher) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 		case WLAN_CIPHER_SUITE_CCMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 		case WLAN_CIPHER_SUITE_CCMP_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 		case WLAN_CIPHER_SUITE_GCMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 		case WLAN_CIPHER_SUITE_GCMP_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) 			pn = atomic64_inc_return(&key->conf.tx_pn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 			crypto_hdr[0] = pn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) 			crypto_hdr[1] = pn >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 			crypto_hdr[3] = 0x20 | (key->conf.keyidx << 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) 			crypto_hdr[4] = pn >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) 			crypto_hdr[5] = pn >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) 			crypto_hdr[6] = pn >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) 			crypto_hdr[7] = pn >> 40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) 				struct sta_info *sta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) 				struct ieee80211_fast_tx *fast_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 				struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 	struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) 	u16 ethertype = (skb->data[12] << 8) | skb->data[13];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) 	int extra_head = fast_tx->hdr_len - (ETH_HLEN - 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) 	int hw_headroom = sdata->local->hw.extra_tx_headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) 	struct ethhdr eth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) 	struct ieee80211_tx_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) 	struct ieee80211_hdr *hdr = (void *)fast_tx->hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) 	struct ieee80211_tx_data tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 	ieee80211_tx_result r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) 	struct tid_ampdu_tx *tid_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 	u8 tid = IEEE80211_NUM_TIDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) 	/* control port protocol needs a lot of special handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 	if (cpu_to_be16(ethertype) == sdata->control_port_protocol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) 	/* only RFC 1042 SNAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) 	if (ethertype < ETH_P_802_3_MIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 	/* don't handle TX status request here either */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) 	if (skb->sk && skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) 	if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) 		tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) 		tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) 		if (tid_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) 			if (!test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 				return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 			if (tid_tx->timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 				tid_tx->last_tx = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 	/* after this point (skb is modified) we cannot return false */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) 	if (skb_shared(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 		struct sk_buff *tmp_skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) 		skb = skb_clone(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) 		kfree_skb(tmp_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) 		if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) 	if ((hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) 	    ieee80211_amsdu_aggregate(sdata, sta, fast_tx, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) 	/* will not be crypto-handled beyond what we do here, so use false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) 	 * as the may-encrypt argument for the resize to not account for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) 	 * more room than we already have in 'extra_head'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) 	if (unlikely(ieee80211_skb_resize(sdata, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) 					  max_t(int, extra_head + hw_headroom -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) 						     skb_headroom(skb), 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) 					  ENCRYPT_NO))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) 	memcpy(&eth, skb->data, ETH_HLEN - 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) 	hdr = skb_push(skb, extra_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) 	memcpy(skb->data, fast_tx->hdr, fast_tx->hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) 	memcpy(skb->data + fast_tx->da_offs, eth.h_dest, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) 	memcpy(skb->data + fast_tx->sa_offs, eth.h_source, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) 	info = IEEE80211_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) 	memset(info, 0, sizeof(*info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) 	info->band = fast_tx->band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) 	info->control.vif = &sdata->vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) 	info->flags = IEEE80211_TX_CTL_FIRST_FRAGMENT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) 		      IEEE80211_TX_CTL_DONTFRAG |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) 		      (tid_tx ? IEEE80211_TX_CTL_AMPDU : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) 	info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) #ifdef CONFIG_MAC80211_DEBUGFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) 	if (local->force_tx_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) 		info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) 	if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) 		tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) 		*ieee80211_get_qos_ctl(hdr) = tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 	__skb_queue_head_init(&tx.skbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) 	tx.flags = IEEE80211_TX_UNICAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) 	tx.local = local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) 	tx.sdata = sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) 	tx.sta = sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) 	tx.key = fast_tx->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) 	if (!ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) 		tx.skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) 		r = ieee80211_tx_h_rate_ctrl(&tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) 		skb = tx.skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) 		tx.skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) 		if (r != TX_CONTINUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 			if (r != TX_QUEUED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) 				kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) 	if (ieee80211_queue_skb(local, sdata, sta, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) 	ieee80211_xmit_fast_finish(sdata, sta, fast_tx->pn_offs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) 				   fast_tx->key, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) 	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) 		sdata = container_of(sdata->bss,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) 				     struct ieee80211_sub_if_data, u.ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) 	__skb_queue_tail(&tx.skbs, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) 	ieee80211_tx_frags(local, &sdata->vif, sta, &tx.skbs, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) 				     struct ieee80211_txq *txq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) 	struct ieee80211_local *local = hw_to_local(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) 	struct txq_info *txqi = container_of(txq, struct txq_info, txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) 	struct ieee80211_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) 	struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) 	struct fq *fq = &local->fq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) 	struct fq_tin *tin = &txqi->tin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) 	struct ieee80211_tx_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) 	struct ieee80211_tx_data tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) 	ieee80211_tx_result r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) 	struct ieee80211_vif *vif = txq->vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) 	WARN_ON_ONCE(softirq_count() == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) 	if (!ieee80211_txq_airtime_check(hw, txq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) begin:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) 	spin_lock_bh(&fq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) 	if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) 	    test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) 	if (vif->txqs_stopped[txq->ac]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) 		set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) 	/* Make sure fragments stay together. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) 	skb = __skb_dequeue(&txqi->frags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) 	if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) 	skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) 	spin_unlock_bh(&fq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) 	hdr = (struct ieee80211_hdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) 	info = IEEE80211_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) 	memset(&tx, 0, sizeof(tx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) 	__skb_queue_head_init(&tx.skbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) 	tx.local = local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) 	tx.skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) 	tx.sdata = vif_to_sdata(info->control.vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) 	if (txq->sta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) 		tx.sta = container_of(txq->sta, struct sta_info, sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) 		 * Drop unicast frames to unauthorised stations unless they are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) 		 * injected frames or EAPOL frames from the local station.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) 		if (unlikely(!(info->flags & IEEE80211_TX_CTL_INJECTED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) 			     ieee80211_is_data(hdr->frame_control) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) 			     !ieee80211_vif_is_mesh(&tx.sdata->vif) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) 			     tx.sdata->vif.type != NL80211_IFTYPE_OCB &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) 			     !is_multicast_ether_addr(hdr->addr1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) 			     !test_sta_flag(tx.sta, WLAN_STA_AUTHORIZED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) 			     (!(info->control.flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) 				IEEE80211_TX_CTRL_PORT_CTRL_PROTO) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) 			      !ether_addr_equal(tx.sdata->vif.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) 						hdr->addr2)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) 			I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) 			ieee80211_free_txskb(&local->hw, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) 			goto begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) 	 * The key can be removed while the packet was queued, so need to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) 	 * this here to get the current key.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) 	r = ieee80211_tx_h_select_key(&tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) 	if (r != TX_CONTINUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) 		ieee80211_free_txskb(&local->hw, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) 		goto begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) 	if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) 		info->flags |= IEEE80211_TX_CTL_AMPDU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) 		info->flags &= ~IEEE80211_TX_CTL_AMPDU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) 	if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) 		goto encap_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) 	if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) 		struct sta_info *sta = container_of(txq->sta, struct sta_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) 						    sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) 		u8 pn_offs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) 		if (tx.key &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) 		    (tx.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) 			pn_offs = ieee80211_hdrlen(hdr->frame_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) 		ieee80211_xmit_fast_finish(sta->sdata, sta, pn_offs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) 					   tx.key, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) 		if (invoke_tx_handlers_late(&tx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) 			goto begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) 		skb = __skb_dequeue(&tx.skbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) 		if (!skb_queue_empty(&tx.skbs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) 			spin_lock_bh(&fq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) 			skb_queue_splice_tail(&tx.skbs, &txqi->frags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) 			spin_unlock_bh(&fq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) 	if (skb_has_frag_list(skb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) 	    !ieee80211_hw_check(&local->hw, TX_FRAG_LIST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) 		if (skb_linearize(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) 			ieee80211_free_txskb(&local->hw, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) 			goto begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) 	switch (tx.sdata->vif.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) 	case NL80211_IFTYPE_MONITOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) 		if (tx.sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) 			vif = &tx.sdata->vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) 		tx.sdata = rcu_dereference(local->monitor_sdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) 		if (tx.sdata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) 			vif = &tx.sdata->vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) 			info->hw_queue =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) 				vif->hw_queue[skb_get_queue_mapping(skb)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) 		} else if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) 			ieee80211_free_txskb(&local->hw, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) 			goto begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) 			vif = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) 	case NL80211_IFTYPE_AP_VLAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) 		tx.sdata = container_of(tx.sdata->bss,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) 					struct ieee80211_sub_if_data, u.ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) 		vif = &tx.sdata->vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) encap_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) 	IEEE80211_SKB_CB(skb)->control.vif = vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) 	if (vif &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) 	    wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) 		bool ampdu = txq->ac != IEEE80211_AC_VO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) 		u32 airtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) 		airtime = ieee80211_calc_expected_tx_airtime(hw, vif, txq->sta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) 							     skb->len, ampdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) 		if (airtime) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) 			airtime = ieee80211_info_set_tx_time_est(info, airtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) 			ieee80211_sta_update_pending_airtime(local, tx.sta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) 							     txq->ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) 							     airtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) 							     false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) 	spin_unlock_bh(&fq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) EXPORT_SYMBOL(ieee80211_tx_dequeue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) 	struct ieee80211_local *local = hw_to_local(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) 	struct ieee80211_txq *ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) 	struct txq_info *txqi = NULL, *head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) 	bool found_eligible_txq = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) 	spin_lock_bh(&local->active_txq_lock[ac]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803)  begin:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) 	txqi = list_first_entry_or_null(&local->active_txqs[ac],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) 					struct txq_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) 					schedule_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) 	if (!txqi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) 	if (txqi == head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) 		if (!found_eligible_txq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) 			found_eligible_txq = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) 	if (!head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) 		head = txqi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) 	if (txqi->txq.sta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) 		struct sta_info *sta = container_of(txqi->txq.sta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) 						    struct sta_info, sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) 		bool aql_check = ieee80211_txq_airtime_check(hw, &txqi->txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) 		s64 deficit = sta->airtime[txqi->txq.ac].deficit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) 		if (aql_check)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) 			found_eligible_txq = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) 		if (deficit < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) 			sta->airtime[txqi->txq.ac].deficit +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) 				sta->airtime_weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) 		if (deficit < 0 || !aql_check) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) 			list_move_tail(&txqi->schedule_order,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) 				       &local->active_txqs[txqi->txq.ac]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) 			goto begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) 	if (txqi->schedule_round == local->schedule_round[ac])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) 	list_del_init(&txqi->schedule_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) 	txqi->schedule_round = local->schedule_round[ac];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) 	ret = &txqi->txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) 	spin_unlock_bh(&local->active_txq_lock[ac]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) EXPORT_SYMBOL(ieee80211_next_txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) 			      struct ieee80211_txq *txq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) 			      bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) 	struct ieee80211_local *local = hw_to_local(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) 	struct txq_info *txqi = to_txq_info(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) 	spin_lock_bh(&local->active_txq_lock[txq->ac]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) 	if (list_empty(&txqi->schedule_order) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) 	    (force || !skb_queue_empty(&txqi->frags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) 	     txqi->tin.backlog_packets)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) 		/* If airtime accounting is active, always enqueue STAs at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) 		 * head of the list to ensure that they only get moved to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) 		 * back by the airtime DRR scheduler once they have a negative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) 		 * deficit. A station that already has a negative deficit will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) 		 * get immediately moved to the back of the list on the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) 		 * call to ieee80211_next_txq().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) 		if (txqi->txq.sta && local->airtime_flags &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) 		    wiphy_ext_feature_isset(local->hw.wiphy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) 					    NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) 			list_add(&txqi->schedule_order,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) 				 &local->active_txqs[txq->ac]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) 			list_add_tail(&txqi->schedule_order,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) 				      &local->active_txqs[txq->ac]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) 	spin_unlock_bh(&local->active_txq_lock[txq->ac]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) EXPORT_SYMBOL(__ieee80211_schedule_txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) bool ieee80211_txq_airtime_check(struct ieee80211_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) 				 struct ieee80211_txq *txq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) 	struct sta_info *sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) 	struct ieee80211_local *local = hw_to_local(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) 	if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) 	if (!txq->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) 	sta = container_of(txq->sta, struct sta_info, sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) 	if (atomic_read(&sta->airtime[txq->ac].aql_tx_pending) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) 	    sta->airtime[txq->ac].aql_limit_low)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) 	if (atomic_read(&local->aql_total_pending_airtime) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) 	    local->aql_threshold &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) 	    atomic_read(&sta->airtime[txq->ac].aql_tx_pending) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) 	    sta->airtime[txq->ac].aql_limit_high)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) EXPORT_SYMBOL(ieee80211_txq_airtime_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) 				struct ieee80211_txq *txq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) 	struct ieee80211_local *local = hw_to_local(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) 	struct txq_info *iter, *tmp, *txqi = to_txq_info(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) 	struct sta_info *sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) 	u8 ac = txq->ac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) 	spin_lock_bh(&local->active_txq_lock[ac]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) 	if (!txqi->txq.sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) 	if (list_empty(&txqi->schedule_order))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) 	list_for_each_entry_safe(iter, tmp, &local->active_txqs[ac],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) 				 schedule_order) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) 		if (iter == txqi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) 		if (!iter->txq.sta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) 			list_move_tail(&iter->schedule_order,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) 				       &local->active_txqs[ac]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) 		sta = container_of(iter->txq.sta, struct sta_info, sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) 		if (sta->airtime[ac].deficit < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) 			sta->airtime[ac].deficit += sta->airtime_weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) 		list_move_tail(&iter->schedule_order, &local->active_txqs[ac]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) 	sta = container_of(txqi->txq.sta, struct sta_info, sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) 	if (sta->airtime[ac].deficit >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) 	sta->airtime[ac].deficit += sta->airtime_weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) 	list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) 	spin_unlock_bh(&local->active_txq_lock[ac]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) 	if (!list_empty(&txqi->schedule_order))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) 		list_del_init(&txqi->schedule_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) 	spin_unlock_bh(&local->active_txq_lock[ac]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) EXPORT_SYMBOL(ieee80211_txq_may_transmit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) 	struct ieee80211_local *local = hw_to_local(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) 	spin_lock_bh(&local->active_txq_lock[ac]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) 	local->schedule_round[ac]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) 	spin_unlock_bh(&local->active_txq_lock[ac]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) EXPORT_SYMBOL(ieee80211_txq_schedule_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) void __ieee80211_subif_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) 				  struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) 				  u32 info_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) 				  u32 ctrl_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) 				  u64 *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) 	struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) 	struct sta_info *sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) 	struct sk_buff *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) 	if (unlikely(skb->len < ETH_HLEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) 	if (ieee80211_lookup_ra_sta(sdata, skb, &sta))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) 	if (IS_ERR(sta))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) 		sta = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) 	if (local->ops->wake_tx_queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) 		u16 queue = __ieee80211_select_queue(sdata, sta, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) 		skb_set_queue_mapping(skb, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) 		skb_get_hash(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) 	if (sta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) 		struct ieee80211_fast_tx *fast_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) 		sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) 		fast_tx = rcu_dereference(sta->fast_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) 		if (fast_tx &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) 		    ieee80211_xmit_fast(sdata, sta, fast_tx, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) 	if (skb_is_gso(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) 		struct sk_buff *segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) 		segs = skb_gso_segment(skb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) 		if (IS_ERR(segs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) 			goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) 		} else if (segs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) 			consume_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) 			skb = segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) 		/* we cannot process non-linear frames on this path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) 		if (skb_linearize(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) 			kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) 		/* the frame could be fragmented, software-encrypted, and other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) 		 * things so we cannot really handle checksum offload with it -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) 		 * fix it up in software before we handle anything else.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) 			skb_set_transport_header(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) 						 skb_checksum_start_offset(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) 			if (skb_checksum_help(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) 				goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) 	skb_list_walk_safe(skb, skb, next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) 		skb_mark_not_on_list(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) 		if (skb->protocol == sdata->control_port_protocol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) 			ctrl_flags |= IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) 		skb = ieee80211_build_hdr(sdata, skb, info_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) 					  sta, ctrl_flags, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) 		if (IS_ERR(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) 			kfree_skb_list(next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) 		ieee80211_tx_stats(dev, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) 		ieee80211_xmit(sdata, sta, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063)  out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) static int ieee80211_change_da(struct sk_buff *skb, struct sta_info *sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) 	struct ethhdr *eth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) 	err = skb_ensure_writable(skb, ETH_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) 	if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) 	eth = (void *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) 	ether_addr_copy(eth->h_dest, sta->sta.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) static bool ieee80211_multicast_to_unicast(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) 					   struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) 	const struct ethhdr *eth = (void *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) 	const struct vlan_ethhdr *ethvlan = (void *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) 	__be16 ethertype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) 	if (likely(!is_multicast_ether_addr(eth->h_dest)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) 	switch (sdata->vif.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) 	case NL80211_IFTYPE_AP_VLAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) 		if (sdata->u.vlan.sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) 		if (sdata->wdev.use_4addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) 	case NL80211_IFTYPE_AP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) 		/* check runtime toggle for this bss */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) 		if (!sdata->bss->multicast_to_unicast)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) 	/* multicast to unicast conversion only for some payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) 	ethertype = eth->h_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) 	if (ethertype == htons(ETH_P_8021Q) && skb->len >= VLAN_ETH_HLEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) 		ethertype = ethvlan->h_vlan_encapsulated_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) 	switch (ethertype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) 	case htons(ETH_P_ARP):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) 	case htons(ETH_P_IP):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) 	case htons(ETH_P_IPV6):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) ieee80211_convert_to_unicast(struct sk_buff *skb, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) 			     struct sk_buff_head *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) 	struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) 	const struct ethhdr *eth = (struct ethhdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) 	struct sta_info *sta, *first = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) 	struct sk_buff *cloned_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) 	list_for_each_entry_rcu(sta, &local->sta_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) 		if (sdata != sta->sdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) 			/* AP-VLAN mismatch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) 		if (unlikely(ether_addr_equal(eth->h_source, sta->sta.addr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) 			/* do not send back to source */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) 		if (!first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) 			first = sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) 		cloned_skb = skb_clone(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) 		if (!cloned_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) 			goto multicast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) 		if (unlikely(ieee80211_change_da(cloned_skb, sta))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) 			dev_kfree_skb(cloned_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) 			goto multicast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) 		__skb_queue_tail(queue, cloned_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) 	if (likely(first)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) 		if (unlikely(ieee80211_change_da(skb, first)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) 			goto multicast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) 		__skb_queue_tail(queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) 		/* no STA connected, drop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) 		skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) multicast:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) 	__skb_queue_purge(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) 	__skb_queue_tail(queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179)  * ieee80211_subif_start_xmit - netif start_xmit function for 802.3 vifs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180)  * @skb: packet to be sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181)  * @dev: incoming interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183)  * On failure skb will be freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) 				       struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) 	if (unlikely(ieee80211_multicast_to_unicast(skb, dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) 		struct sk_buff_head queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) 		__skb_queue_head_init(&queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) 		ieee80211_convert_to_unicast(skb, dev, &queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) 		while ((skb = __skb_dequeue(&queue)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) 			__ieee80211_subif_start_xmit(skb, dev, 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) 		__ieee80211_subif_start_xmit(skb, dev, 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) static bool ieee80211_tx_8023(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) 			      struct sk_buff *skb, int led_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) 			      struct sta_info *sta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) 			      bool txpending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) 	struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) 	struct ieee80211_tx_control control = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) 	struct ieee80211_sta *pubsta = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) 	int q = info->hw_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) 	if (ieee80211_queue_skb(local, sdata, sta, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) 	spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) 	if (local->queue_stop_reasons[q] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) 	    (!txpending && !skb_queue_empty(&local->pending[q]))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) 		if (txpending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) 			skb_queue_head(&local->pending[q], skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) 			skb_queue_tail(&local->pending[q], skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) 		spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) 	spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) 	if (sta && sta->uploaded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) 		pubsta = &sta->sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) 	control.sta = pubsta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) 	drv_tx(local, &control, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) 				struct net_device *dev, struct sta_info *sta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) 				struct ieee80211_key *key, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) 	struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) 	struct tid_ampdu_tx *tid_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) 	u8 tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) 	if (local->ops->wake_tx_queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) 		u16 queue = __ieee80211_select_queue(sdata, sta, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) 		skb_set_queue_mapping(skb, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) 		skb_get_hash(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) 	if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) 	    test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) 	memset(info, 0, sizeof(*info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) 	tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) 	tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) 	if (tid_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) 		if (!test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) 			/* fall back to non-offload slow path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) 			__ieee80211_subif_start_xmit(skb, dev, 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) 		info->flags |= IEEE80211_TX_CTL_AMPDU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) 		if (tid_tx->timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) 			tid_tx->last_tx = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) 	if (unlikely(skb->sk &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) 		     skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) 		info->ack_frame_id = ieee80211_store_ack_skb(local, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) 							     &info->flags, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) 	info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) 	ieee80211_tx_stats(dev, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) 	sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) 	sta->tx_stats.packets[skb_get_queue_mapping(skb)]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) 	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) 		sdata = container_of(sdata->bss,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) 				     struct ieee80211_sub_if_data, u.ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) 	info->flags |= IEEE80211_TX_CTL_HW_80211_ENCAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) 	info->control.vif = &sdata->vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) 	if (key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) 		info->control.hw_key = &key->conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) 	ieee80211_tx_8023(sdata, skb, skb->len, sta, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) 					    struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) 	struct ethhdr *ehdr = (struct ethhdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) 	struct ieee80211_key *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) 	struct sta_info *sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) 	if (unlikely(skb->len < ETH_HLEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) 		return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) 	if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) 	if (unlikely(IS_ERR_OR_NULL(sta) || !sta->uploaded ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) 	    !test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) 	    sdata->control_port_protocol == ehdr->h_proto))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) 		goto skip_offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) 	key = rcu_dereference(sta->ptk[sta->ptk_idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) 	if (!key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) 		key = rcu_dereference(sdata->default_unicast_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) 	if (key && (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) 		    key->conf.cipher == WLAN_CIPHER_SUITE_TKIP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) 		goto skip_offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) 	ieee80211_8023_xmit(sdata, dev, sta, key, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) skip_offload:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) 	ieee80211_subif_start_xmit(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) struct sk_buff *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) 			      struct sk_buff *skb, u32 info_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) 	struct ieee80211_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) 	struct ieee80211_tx_data tx = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) 		.local = sdata->local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) 		.sdata = sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) 	struct sta_info *sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) 	if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) 		skb = ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) 	skb = ieee80211_build_hdr(sdata, skb, info_flags, sta, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) 	if (IS_ERR(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) 	hdr = (void *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) 	tx.sta = sta_info_get(sdata, hdr->addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) 	tx.skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) 	if (ieee80211_tx_h_select_key(&tx) != TX_CONTINUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391)  * ieee80211_clear_tx_pending may not be called in a context where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392)  * it is possible that it packets could come in again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) void ieee80211_clear_tx_pending(struct ieee80211_local *local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) 	for (i = 0; i < local->hw.queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) 		while ((skb = skb_dequeue(&local->pending[i])) != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) 			ieee80211_free_txskb(&local->hw, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406)  * Returns false if the frame couldn't be transmitted but was queued instead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407)  * which in this case means re-queued -- take as an indication to stop sending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408)  * more pending frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) 				     struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) 	struct ieee80211_sub_if_data *sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) 	struct sta_info *sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) 	struct ieee80211_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) 	bool result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) 	struct ieee80211_chanctx_conf *chanctx_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) 	sdata = vif_to_sdata(info->control.vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) 	if (info->control.flags & IEEE80211_TX_INTCFL_NEED_TXPROCESSING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) 		chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) 		if (unlikely(!chanctx_conf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) 			dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) 		info->band = chanctx_conf->def.chan->band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) 		result = ieee80211_tx(sdata, NULL, skb, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) 	} else if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) 		if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) 			dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) 		if (IS_ERR(sta) || (sta && !sta->uploaded))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) 			sta = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) 		result = ieee80211_tx_8023(sdata, skb, skb->len, sta, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) 		struct sk_buff_head skbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) 		__skb_queue_head_init(&skbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) 		__skb_queue_tail(&skbs, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) 		hdr = (struct ieee80211_hdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) 		sta = sta_info_get(sdata, hdr->addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) 		result = __ieee80211_tx(local, &skbs, skb->len, sta, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456)  * Transmit all pending packets. Called from tasklet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) void ieee80211_tx_pending(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) 	struct ieee80211_local *local = (struct ieee80211_local *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) 	bool txok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) 	spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) 	for (i = 0; i < local->hw.queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) 		 * If queue is stopped by something other than due to pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) 		 * frames, or we have no pending frames, proceed to next queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) 		if (local->queue_stop_reasons[i] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) 		    skb_queue_empty(&local->pending[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) 		while (!skb_queue_empty(&local->pending[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) 			struct sk_buff *skb = __skb_dequeue(&local->pending[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) 			struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) 			if (WARN_ON(!info->control.vif)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) 				ieee80211_free_txskb(&local->hw, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) 			spin_unlock_irqrestore(&local->queue_stop_reason_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) 						flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) 			txok = ieee80211_tx_pending_skb(local, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) 			spin_lock_irqsave(&local->queue_stop_reason_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) 					  flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) 			if (!txok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) 		if (skb_queue_empty(&local->pending[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) 			ieee80211_propagate_queue_wake(local, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) 	spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) /* functions for drivers to get certain frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) 				       struct ps_data *ps, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) 				       bool is_template)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) 	u8 *pos, *tim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) 	int aid0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) 	int i, have_bits = 0, n1, n2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) 	/* Generate bitmap for TIM only if there are any STAs in power save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) 	 * mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) 	if (atomic_read(&ps->num_sta_ps) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) 		/* in the hope that this is faster than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) 		 * checking byte-for-byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) 		have_bits = !bitmap_empty((unsigned long *)ps->tim,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) 					  IEEE80211_MAX_AID+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) 	if (!is_template) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) 		if (ps->dtim_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) 			ps->dtim_count = sdata->vif.bss_conf.dtim_period - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) 			ps->dtim_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) 	tim = pos = skb_put(skb, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) 	*pos++ = WLAN_EID_TIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) 	*pos++ = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) 	*pos++ = ps->dtim_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) 	*pos++ = sdata->vif.bss_conf.dtim_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) 	if (ps->dtim_count == 0 && !skb_queue_empty(&ps->bc_buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) 		aid0 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) 	ps->dtim_bc_mc = aid0 == 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) 	if (have_bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) 		/* Find largest even number N1 so that bits numbered 1 through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) 		 * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) 		 * (N2 + 1) x 8 through 2007 are 0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) 		n1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) 		for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) 			if (ps->tim[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) 				n1 = i & 0xfe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) 		n2 = n1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) 		for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) 			if (ps->tim[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) 				n2 = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) 		/* Bitmap control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) 		*pos++ = n1 | aid0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) 		/* Part Virt Bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) 		skb_put(skb, n2 - n1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) 		memcpy(pos, ps->tim + n1, n2 - n1 + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) 		tim[1] = n2 - n1 + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) 		*pos++ = aid0; /* Bitmap control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) 		*pos++ = 0; /* Part Virt Bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) 				    struct ps_data *ps, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) 				    bool is_template)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) 	struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) 	 * Not very nice, but we want to allow the driver to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) 	 * ieee80211_beacon_get() as a response to the set_tim()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) 	 * callback. That, however, is already invoked under the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) 	 * sta_lock to guarantee consistent and race-free update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) 	 * of the tim bitmap in mac80211 and the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) 	if (local->tim_in_locked_section) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) 		__ieee80211_beacon_add_tim(sdata, ps, skb, is_template);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) 		spin_lock_bh(&local->tim_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) 		__ieee80211_beacon_add_tim(sdata, ps, skb, is_template);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) 		spin_unlock_bh(&local->tim_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) static void ieee80211_set_beacon_cntdwn(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) 					struct beacon_data *beacon)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) 	struct probe_resp *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) 	u8 *beacon_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) 	size_t beacon_data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) 	u8 count = beacon->cntdwn_current_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) 	switch (sdata->vif.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) 	case NL80211_IFTYPE_AP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) 		beacon_data = beacon->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) 		beacon_data_len = beacon->tail_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) 	case NL80211_IFTYPE_ADHOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) 		beacon_data = beacon->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) 		beacon_data_len = beacon->head_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) 	case NL80211_IFTYPE_MESH_POINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) 		beacon_data = beacon->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) 		beacon_data_len = beacon->head_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) 	for (i = 0; i < IEEE80211_MAX_CNTDWN_COUNTERS_NUM; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) 		resp = rcu_dereference(sdata->u.ap.probe_resp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) 		if (beacon->cntdwn_counter_offsets[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) 			if (WARN_ON_ONCE(beacon->cntdwn_counter_offsets[i] >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) 					 beacon_data_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) 				rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) 			beacon_data[beacon->cntdwn_counter_offsets[i]] = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) 		if (sdata->vif.type == NL80211_IFTYPE_AP && resp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) 			resp->data[resp->cntdwn_counter_offsets[i]] = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) static u8 __ieee80211_beacon_update_cntdwn(struct beacon_data *beacon)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) 	beacon->cntdwn_current_counter--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) 	/* the counter should never reach 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) 	WARN_ON_ONCE(!beacon->cntdwn_current_counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) 	return beacon->cntdwn_current_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) 	struct beacon_data *beacon = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) 	u8 count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) 	if (sdata->vif.type == NL80211_IFTYPE_AP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) 		beacon = rcu_dereference(sdata->u.ap.beacon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) 	else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) 		beacon = rcu_dereference(sdata->u.ibss.presp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) 	else if (ieee80211_vif_is_mesh(&sdata->vif))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) 		beacon = rcu_dereference(sdata->u.mesh.beacon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) 	if (!beacon)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) 	count = __ieee80211_beacon_update_cntdwn(beacon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) EXPORT_SYMBOL(ieee80211_beacon_update_cntdwn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) void ieee80211_beacon_set_cntdwn(struct ieee80211_vif *vif, u8 counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) 	struct beacon_data *beacon = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) 	if (sdata->vif.type == NL80211_IFTYPE_AP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) 		beacon = rcu_dereference(sdata->u.ap.beacon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) 	else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) 		beacon = rcu_dereference(sdata->u.ibss.presp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) 	else if (ieee80211_vif_is_mesh(&sdata->vif))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) 		beacon = rcu_dereference(sdata->u.mesh.beacon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) 	if (!beacon)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) 	if (counter < beacon->cntdwn_current_counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) 		beacon->cntdwn_current_counter = counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) EXPORT_SYMBOL(ieee80211_beacon_set_cntdwn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) 	struct beacon_data *beacon = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) 	u8 *beacon_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) 	size_t beacon_data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) 	int ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) 	if (!ieee80211_sdata_running(sdata))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) 	if (vif->type == NL80211_IFTYPE_AP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) 		struct ieee80211_if_ap *ap = &sdata->u.ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) 		beacon = rcu_dereference(ap->beacon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) 		if (WARN_ON(!beacon || !beacon->tail))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) 		beacon_data = beacon->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) 		beacon_data_len = beacon->tail_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) 	} else if (vif->type == NL80211_IFTYPE_ADHOC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) 		struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) 		beacon = rcu_dereference(ifibss->presp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) 		if (!beacon)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) 		beacon_data = beacon->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) 		beacon_data_len = beacon->head_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) 	} else if (vif->type == NL80211_IFTYPE_MESH_POINT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) 		struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) 		beacon = rcu_dereference(ifmsh->beacon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) 		if (!beacon)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) 		beacon_data = beacon->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) 		beacon_data_len = beacon->head_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) 		WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) 	if (!beacon->cntdwn_counter_offsets[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) 	if (WARN_ON_ONCE(beacon->cntdwn_counter_offsets[0] > beacon_data_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) 	if (beacon_data[beacon->cntdwn_counter_offsets[0]] == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) 		ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) EXPORT_SYMBOL(ieee80211_beacon_cntdwn_is_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) static int ieee80211_beacon_protect(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) 				    struct ieee80211_local *local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) 				    struct ieee80211_sub_if_data *sdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) 	ieee80211_tx_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) 	struct ieee80211_tx_data tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) 	struct sk_buff *check_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) 	memset(&tx, 0, sizeof(tx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) 	tx.key = rcu_dereference(sdata->default_beacon_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) 	if (!tx.key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) 	tx.local = local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) 	tx.sdata = sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) 	__skb_queue_head_init(&tx.skbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) 	__skb_queue_tail(&tx.skbs, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) 	res = ieee80211_tx_h_encrypt(&tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) 	check_skb = __skb_dequeue(&tx.skbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) 	/* we may crash after this, but it'd be a bug in crypto */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) 	WARN_ON(check_skb != skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) 	if (WARN_ON_ONCE(res != TX_CONTINUE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) static struct sk_buff *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) __ieee80211_beacon_get(struct ieee80211_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) 		       struct ieee80211_vif *vif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) 		       struct ieee80211_mutable_offsets *offs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) 		       bool is_template)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) 	struct ieee80211_local *local = hw_to_local(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) 	struct beacon_data *beacon = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) 	struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) 	struct ieee80211_tx_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) 	struct ieee80211_sub_if_data *sdata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) 	enum nl80211_band band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) 	struct ieee80211_tx_rate_control txrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) 	struct ieee80211_chanctx_conf *chanctx_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) 	int csa_off_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) 	sdata = vif_to_sdata(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) 	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) 	if (!ieee80211_sdata_running(sdata) || !chanctx_conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) 	if (offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) 		memset(offs, 0, sizeof(*offs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) 	if (sdata->vif.type == NL80211_IFTYPE_AP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) 		struct ieee80211_if_ap *ap = &sdata->u.ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) 		beacon = rcu_dereference(ap->beacon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) 		if (beacon) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) 			if (beacon->cntdwn_counter_offsets[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) 				if (!is_template)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) 					ieee80211_beacon_update_cntdwn(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) 				ieee80211_set_beacon_cntdwn(sdata, beacon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) 			 * headroom, head length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) 			 * tail length and maximum TIM length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) 			skb = dev_alloc_skb(local->tx_headroom +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) 					    beacon->head_len +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) 					    beacon->tail_len + 256 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) 					    local->hw.extra_beacon_tailroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) 			if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) 			skb_reserve(skb, local->tx_headroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) 			skb_put_data(skb, beacon->head, beacon->head_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) 			ieee80211_beacon_add_tim(sdata, &ap->ps, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) 						 is_template);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) 			if (offs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) 				offs->tim_offset = beacon->head_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) 				offs->tim_length = skb->len - beacon->head_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) 				/* for AP the csa offsets are from tail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) 				csa_off_base = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) 			if (beacon->tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) 				skb_put_data(skb, beacon->tail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) 					     beacon->tail_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) 			if (ieee80211_beacon_protect(skb, local, sdata) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) 	} else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) 		struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) 		struct ieee80211_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) 		beacon = rcu_dereference(ifibss->presp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) 		if (!beacon)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) 		if (beacon->cntdwn_counter_offsets[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) 			if (!is_template)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) 				__ieee80211_beacon_update_cntdwn(beacon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) 			ieee80211_set_beacon_cntdwn(sdata, beacon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) 		skb = dev_alloc_skb(local->tx_headroom + beacon->head_len +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) 				    local->hw.extra_beacon_tailroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) 		if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) 		skb_reserve(skb, local->tx_headroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) 		skb_put_data(skb, beacon->head, beacon->head_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) 		hdr = (struct ieee80211_hdr *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) 		hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) 						 IEEE80211_STYPE_BEACON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) 	} else if (ieee80211_vif_is_mesh(&sdata->vif)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) 		struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) 		beacon = rcu_dereference(ifmsh->beacon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) 		if (!beacon)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) 		if (beacon->cntdwn_counter_offsets[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) 			if (!is_template)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) 				/* TODO: For mesh csa_counter is in TU, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) 				 * decrementing it by one isn't correct, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) 				 * for now we leave it consistent with overall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) 				 * mac80211's behavior.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) 				__ieee80211_beacon_update_cntdwn(beacon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) 			ieee80211_set_beacon_cntdwn(sdata, beacon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) 		if (ifmsh->sync_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) 			ifmsh->sync_ops->adjust_tsf(sdata, beacon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) 		skb = dev_alloc_skb(local->tx_headroom +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) 				    beacon->head_len +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) 				    256 + /* TIM IE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) 				    beacon->tail_len +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) 				    local->hw.extra_beacon_tailroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) 		if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) 		skb_reserve(skb, local->tx_headroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) 		skb_put_data(skb, beacon->head, beacon->head_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) 		ieee80211_beacon_add_tim(sdata, &ifmsh->ps, skb, is_template);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) 		if (offs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) 			offs->tim_offset = beacon->head_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) 			offs->tim_length = skb->len - beacon->head_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) 		skb_put_data(skb, beacon->tail, beacon->tail_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) 		WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) 	/* CSA offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) 	if (offs && beacon) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) 		for (i = 0; i < IEEE80211_MAX_CNTDWN_COUNTERS_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) 			u16 csa_off = beacon->cntdwn_counter_offsets[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) 			if (!csa_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) 			offs->cntdwn_counter_offs[i] = csa_off_base + csa_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) 	band = chanctx_conf->def.chan->band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) 	info = IEEE80211_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946) 	info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) 	info->flags |= IEEE80211_TX_CTL_NO_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) 	info->band = band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) 	memset(&txrc, 0, sizeof(txrc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) 	txrc.hw = hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) 	txrc.sband = local->hw.wiphy->bands[band];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) 	txrc.bss_conf = &sdata->vif.bss_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) 	txrc.skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) 	txrc.reported_rate.idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) 	if (sdata->beacon_rate_set && sdata->beacon_rateidx_mask[band])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) 		txrc.rate_idx_mask = sdata->beacon_rateidx_mask[band];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) 		txrc.rate_idx_mask = sdata->rc_rateidx_mask[band];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) 	txrc.bss = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) 	rate_control_get_rate(sdata, NULL, &txrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) 	info->control.vif = vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) 	info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) 			IEEE80211_TX_CTL_ASSIGN_SEQ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) 			IEEE80211_TX_CTL_FIRST_FRAGMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) struct sk_buff *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) ieee80211_beacon_get_template(struct ieee80211_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) 			      struct ieee80211_vif *vif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) 			      struct ieee80211_mutable_offsets *offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) 	return __ieee80211_beacon_get(hw, vif, offs, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) EXPORT_SYMBOL(ieee80211_beacon_get_template);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) 					 struct ieee80211_vif *vif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) 					 u16 *tim_offset, u16 *tim_length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) 	struct ieee80211_mutable_offsets offs = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) 	struct sk_buff *bcn = __ieee80211_beacon_get(hw, vif, &offs, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) 	struct sk_buff *copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) 	struct ieee80211_supported_band *sband;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) 	int shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) 	if (!bcn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) 		return bcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) 	if (tim_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) 		*tim_offset = offs.tim_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) 	if (tim_length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) 		*tim_length = offs.tim_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) 	if (ieee80211_hw_check(hw, BEACON_TX_STATUS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) 	    !hw_to_local(hw)->monitors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) 		return bcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) 	/* send a copy to monitor interfaces */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) 	copy = skb_copy(bcn, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) 	if (!copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) 		return bcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) 	shift = ieee80211_vif_get_shift(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) 	sband = ieee80211_get_sband(vif_to_sdata(vif));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) 	if (!sband)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) 		return bcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) 	ieee80211_tx_monitor(hw_to_local(hw), copy, sband, 1, shift, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) 			     NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) 	return bcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) EXPORT_SYMBOL(ieee80211_beacon_get_tim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) 					struct ieee80211_vif *vif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) 	struct ieee80211_if_ap *ap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) 	struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) 	struct probe_resp *presp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) 	struct ieee80211_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032) 	if (sdata->vif.type != NL80211_IFTYPE_AP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) 	ap = &sdata->u.ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) 	presp = rcu_dereference(ap->probe_resp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) 	if (!presp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) 	skb = dev_alloc_skb(presp->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) 	skb_put_data(skb, presp->data, presp->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) 	hdr = (struct ieee80211_hdr *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) 	memset(hdr->addr1, 0, sizeof(hdr->addr1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) EXPORT_SYMBOL(ieee80211_proberesp_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057) struct sk_buff *ieee80211_get_fils_discovery_tmpl(struct ieee80211_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) 						  struct ieee80211_vif *vif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060) 	struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) 	struct fils_discovery_data *tmpl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) 	if (sdata->vif.type != NL80211_IFTYPE_AP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) 	tmpl = rcu_dereference(sdata->u.ap.fils_discovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) 	if (!tmpl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) 	skb = dev_alloc_skb(sdata->local->hw.extra_tx_headroom + tmpl->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) 	if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) 		skb_reserve(skb, sdata->local->hw.extra_tx_headroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) 		skb_put_data(skb, tmpl->data, tmpl->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083) EXPORT_SYMBOL(ieee80211_get_fils_discovery_tmpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) struct sk_buff *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087) 					  struct ieee80211_vif *vif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) 	struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) 	struct unsol_bcast_probe_resp_data *tmpl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093) 	if (sdata->vif.type != NL80211_IFTYPE_AP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) 	tmpl = rcu_dereference(sdata->u.ap.unsol_bcast_probe_resp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098) 	if (!tmpl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) 	skb = dev_alloc_skb(sdata->local->hw.extra_tx_headroom + tmpl->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) 	if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) 		skb_reserve(skb, sdata->local->hw.extra_tx_headroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) 		skb_put_data(skb, tmpl->data, tmpl->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) EXPORT_SYMBOL(ieee80211_get_unsol_bcast_probe_resp_tmpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) 				     struct ieee80211_vif *vif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) 	struct ieee80211_sub_if_data *sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118) 	struct ieee80211_if_managed *ifmgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) 	struct ieee80211_pspoll *pspoll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120) 	struct ieee80211_local *local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) 	if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126) 	sdata = vif_to_sdata(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) 	ifmgd = &sdata->u.mgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) 	local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) 	skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) 	skb_reserve(skb, local->hw.extra_tx_headroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) 	pspoll = skb_put_zero(skb, sizeof(*pspoll));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) 	pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) 					    IEEE80211_STYPE_PSPOLL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) 	pspoll->aid = cpu_to_le16(sdata->vif.bss_conf.aid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) 	/* aid in PS-Poll has its two MSBs each set to 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) 	pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) 	memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) 	memcpy(pspoll->ta, vif->addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) EXPORT_SYMBOL(ieee80211_pspoll_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152) 				       struct ieee80211_vif *vif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) 				       bool qos_ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) 	struct ieee80211_hdr_3addr *nullfunc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) 	struct ieee80211_sub_if_data *sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157) 	struct ieee80211_if_managed *ifmgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) 	struct ieee80211_local *local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) 	bool qos = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162) 	if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) 	sdata = vif_to_sdata(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166) 	ifmgd = &sdata->u.mgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167) 	local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) 	if (qos_ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170) 		struct sta_info *sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173) 		sta = sta_info_get(sdata, ifmgd->bssid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174) 		qos = sta && sta->sta.wme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) 	skb = dev_alloc_skb(local->hw.extra_tx_headroom +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179) 			    sizeof(*nullfunc) + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183) 	skb_reserve(skb, local->hw.extra_tx_headroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) 	nullfunc = skb_put_zero(skb, sizeof(*nullfunc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) 	nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187) 					      IEEE80211_STYPE_NULLFUNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188) 					      IEEE80211_FCTL_TODS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) 	if (qos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) 		__le16 qoshdr = cpu_to_le16(7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192) 		BUILD_BUG_ON((IEEE80211_STYPE_QOS_NULLFUNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) 			      IEEE80211_STYPE_NULLFUNC) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) 			     IEEE80211_STYPE_QOS_NULLFUNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) 		nullfunc->frame_control |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) 			cpu_to_le16(IEEE80211_STYPE_QOS_NULLFUNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) 		skb->priority = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) 		skb_set_queue_mapping(skb, IEEE80211_AC_VO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) 		skb_put_data(skb, &qoshdr, sizeof(qoshdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202) 	memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) 	memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) 	memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208) EXPORT_SYMBOL(ieee80211_nullfunc_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) 				       const u8 *src_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) 				       const u8 *ssid, size_t ssid_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) 				       size_t tailroom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) 	struct ieee80211_local *local = hw_to_local(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216) 	struct ieee80211_hdr_3addr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) 	size_t ie_ssid_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219) 	u8 *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) 	ie_ssid_len = 2 + ssid_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223) 	skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) 			    ie_ssid_len + tailroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228) 	skb_reserve(skb, local->hw.extra_tx_headroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) 	hdr = skb_put_zero(skb, sizeof(*hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) 					 IEEE80211_STYPE_PROBE_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233) 	eth_broadcast_addr(hdr->addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) 	memcpy(hdr->addr2, src_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235) 	eth_broadcast_addr(hdr->addr3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237) 	pos = skb_put(skb, ie_ssid_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238) 	*pos++ = WLAN_EID_SSID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) 	*pos++ = ssid_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) 	if (ssid_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241) 		memcpy(pos, ssid, ssid_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242) 	pos += ssid_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) EXPORT_SYMBOL(ieee80211_probereq_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) 		       const void *frame, size_t frame_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) 		       const struct ieee80211_tx_info *frame_txctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251) 		       struct ieee80211_rts *rts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253) 	const struct ieee80211_hdr *hdr = frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) 	rts->frame_control =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256) 	    cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257) 	rts->duration = ieee80211_rts_duration(hw, vif, frame_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258) 					       frame_txctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259) 	memcpy(rts->ra, hdr->addr1, sizeof(rts->ra));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260) 	memcpy(rts->ta, hdr->addr2, sizeof(rts->ta));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262) EXPORT_SYMBOL(ieee80211_rts_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264) void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265) 			     const void *frame, size_t frame_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266) 			     const struct ieee80211_tx_info *frame_txctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267) 			     struct ieee80211_cts *cts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269) 	const struct ieee80211_hdr *hdr = frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271) 	cts->frame_control =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272) 	    cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) 	cts->duration = ieee80211_ctstoself_duration(hw, vif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274) 						     frame_len, frame_txctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275) 	memcpy(cts->ra, hdr->addr1, sizeof(cts->ra));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277) EXPORT_SYMBOL(ieee80211_ctstoself_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279) struct sk_buff *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280) ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281) 			  struct ieee80211_vif *vif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283) 	struct ieee80211_local *local = hw_to_local(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284) 	struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285) 	struct ieee80211_tx_data tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286) 	struct ieee80211_sub_if_data *sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287) 	struct ps_data *ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288) 	struct ieee80211_tx_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289) 	struct ieee80211_chanctx_conf *chanctx_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291) 	sdata = vif_to_sdata(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294) 	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296) 	if (!chanctx_conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) 	if (sdata->vif.type == NL80211_IFTYPE_AP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300) 		struct beacon_data *beacon =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301) 				rcu_dereference(sdata->u.ap.beacon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303) 		if (!beacon || !beacon->head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306) 		ps = &sdata->u.ap.ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307) 	} else if (ieee80211_vif_is_mesh(&sdata->vif)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308) 		ps = &sdata->u.mesh.ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313) 	if (ps->dtim_count != 0 || !ps->dtim_bc_mc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) 		goto out; /* send buffered bc/mc only after DTIM beacon */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) 		skb = skb_dequeue(&ps->bc_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318) 		if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320) 		local->total_ps_buffered--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322) 		if (!skb_queue_empty(&ps->bc_buf) && skb->len >= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) 			struct ieee80211_hdr *hdr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324) 				(struct ieee80211_hdr *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325) 			/* more buffered multicast/broadcast frames ==> set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326) 			 * MoreData flag in IEEE 802.11 header to inform PS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327) 			 * STAs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328) 			hdr->frame_control |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) 				cpu_to_le16(IEEE80211_FCTL_MOREDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332) 		if (sdata->vif.type == NL80211_IFTYPE_AP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333) 			sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334) 		if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336) 		ieee80211_free_txskb(hw, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339) 	info = IEEE80211_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341) 	tx.flags |= IEEE80211_TX_PS_BUFFERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342) 	info->band = chanctx_conf->def.chan->band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344) 	if (invoke_tx_handlers(&tx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345) 		skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351) EXPORT_SYMBOL(ieee80211_get_buffered_bc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353) int ieee80211_reserve_tid(struct ieee80211_sta *pubsta, u8 tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355) 	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356) 	struct ieee80211_sub_if_data *sdata = sta->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357) 	struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359) 	u32 queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361) 	lockdep_assert_held(&local->sta_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) 	/* only some cases are supported right now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364) 	switch (sdata->vif.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365) 	case NL80211_IFTYPE_STATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366) 	case NL80211_IFTYPE_AP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367) 	case NL80211_IFTYPE_AP_VLAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370) 		WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) 	if (WARN_ON(tid >= IEEE80211_NUM_UPS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377) 	if (sta->reserved_tid == tid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382) 	if (sta->reserved_tid != IEEE80211_TID_UNRESERVED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383) 		sdata_err(sdata, "TID reservation already active\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384) 		ret = -EALREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388) 	ieee80211_stop_vif_queues(sdata->local, sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389) 				  IEEE80211_QUEUE_STOP_REASON_RESERVE_TID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391) 	synchronize_net();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393) 	/* Tear down BA sessions so we stop aggregating on this TID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394) 	if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395) 		set_sta_flag(sta, WLAN_STA_BLOCK_BA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396) 		__ieee80211_stop_tx_ba_session(sta, tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397) 					       AGG_STOP_LOCAL_REQUEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400) 	queues = BIT(sdata->vif.hw_queue[ieee802_1d_to_ac[tid]]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401) 	__ieee80211_flush_queues(local, sdata, queues, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403) 	sta->reserved_tid = tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405) 	ieee80211_wake_vif_queues(local, sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406) 				  IEEE80211_QUEUE_STOP_REASON_RESERVE_TID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408) 	if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409) 		clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415) EXPORT_SYMBOL(ieee80211_reserve_tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417) void ieee80211_unreserve_tid(struct ieee80211_sta *pubsta, u8 tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419) 	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420) 	struct ieee80211_sub_if_data *sdata = sta->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422) 	lockdep_assert_held(&sdata->local->sta_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424) 	/* only some cases are supported right now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425) 	switch (sdata->vif.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426) 	case NL80211_IFTYPE_STATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427) 	case NL80211_IFTYPE_AP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428) 	case NL80211_IFTYPE_AP_VLAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431) 		WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435) 	if (tid != sta->reserved_tid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436) 		sdata_err(sdata, "TID to unreserve (%d) isn't reserved\n", tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440) 	sta->reserved_tid = IEEE80211_TID_UNRESERVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442) EXPORT_SYMBOL(ieee80211_unreserve_tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444) void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445) 				 struct sk_buff *skb, int tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446) 				 enum nl80211_band band)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448) 	int ac = ieee80211_ac_from_tid(tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450) 	skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451) 	skb_set_queue_mapping(skb, ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452) 	skb->priority = tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454) 	skb->dev = sdata->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457) 	 * The other path calling ieee80211_xmit is from the tasklet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458) 	 * and while we can handle concurrent transmissions locking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459) 	 * requirements are that we do not come into tx with bhs on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461) 	local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462) 	IEEE80211_SKB_CB(skb)->band = band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463) 	ieee80211_xmit(sdata, NULL, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464) 	local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467) int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468) 			      const u8 *buf, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469) 			      const u8 *dest, __be16 proto, bool unencrypted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470) 			      u64 *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472) 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473) 	struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475) 	struct ethhdr *ehdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5476) 	u32 ctrl_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5477) 	u32 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5479) 	/* Only accept CONTROL_PORT_PROTOCOL configured in CONNECT/ASSOCIATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5480) 	 * or Pre-Authentication
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5481) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5482) 	if (proto != sdata->control_port_protocol &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5483) 	    proto != cpu_to_be16(ETH_P_PREAUTH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5484) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5486) 	if (proto == sdata->control_port_protocol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5487) 		ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5488) 			      IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5490) 	if (unencrypted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5491) 		flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5493) 	if (cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5494) 		ctrl_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5496) 	flags |= IEEE80211_TX_INTFL_NL80211_FRAME_TX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5497) 		 IEEE80211_TX_CTL_INJECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5499) 	skb = dev_alloc_skb(local->hw.extra_tx_headroom +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5500) 			    sizeof(struct ethhdr) + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5501) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5502) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5504) 	skb_reserve(skb, local->hw.extra_tx_headroom + sizeof(struct ethhdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5506) 	skb_put_data(skb, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5508) 	ehdr = skb_push(skb, sizeof(struct ethhdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5509) 	memcpy(ehdr->h_dest, dest, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5510) 	memcpy(ehdr->h_source, sdata->vif.addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5511) 	ehdr->h_proto = proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5513) 	skb->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5514) 	skb->protocol = htons(ETH_P_802_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5515) 	skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5516) 	skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5518) 	/* mutex lock is only needed for incrementing the cookie counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5519) 	mutex_lock(&local->mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5521) 	local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5522) 	__ieee80211_subif_start_xmit(skb, skb->dev, flags, ctrl_flags, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5523) 	local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5525) 	mutex_unlock(&local->mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5527) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5530) int ieee80211_probe_mesh_link(struct wiphy *wiphy, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5531) 			      const u8 *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5533) 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5534) 	struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5535) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5537) 	skb = dev_alloc_skb(local->hw.extra_tx_headroom + len +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5538) 			    30 + /* header size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5539) 			    18); /* 11s header size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5540) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5541) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5543) 	skb_reserve(skb, local->hw.extra_tx_headroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5544) 	skb_put_data(skb, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5546) 	skb->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5547) 	skb->protocol = htons(ETH_P_802_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5548) 	skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5549) 	skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5551) 	local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5552) 	__ieee80211_subif_start_xmit(skb, skb->dev, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5553) 				     IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5554) 				     NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5555) 	local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5557) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5558) }