^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2002-2005, Instant802 Networks, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2005-2006, Devicescape Software, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright 2013-2014 Intel Mobile Communications GmbH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 2018-2021 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <net/mac80211.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <net/ieee80211_radiotap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "ieee80211_i.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "driver-ops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "led.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "mesh.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "wep.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "wpa.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "tkip.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "wme.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "rate.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static inline void ieee80211_rx_stats(struct net_device *dev, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) u64_stats_update_begin(&tstats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) tstats->rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) tstats->rx_bytes += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) u64_stats_update_end(&tstats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * monitor mode reception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * This function cleans up the SKB, i.e. it removes all the stuff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * only useful for monitoring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static struct sk_buff *ieee80211_clean_skb(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) unsigned int present_fcs_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) unsigned int rtap_space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct ieee80211_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) unsigned int hdrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) __le16 fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (present_fcs_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) __pskb_trim(skb, skb->len - present_fcs_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) __pskb_pull(skb, rtap_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) hdr = (void *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) fc = hdr->frame_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * Remove the HT-Control field (if present) on management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * frames after we've sent the frame to monitoring. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * (currently) don't need it, and don't properly parse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * frames with it present, due to the assumption of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * fixed management header length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (likely(!ieee80211_is_mgmt(fc) || !ieee80211_has_order(fc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) hdrlen = ieee80211_hdrlen(fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (!pskb_may_pull(skb, hdrlen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) memmove(skb->data + IEEE80211_HT_CTL_LEN, skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) hdrlen - IEEE80211_HT_CTL_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) __pskb_pull(skb, IEEE80211_HT_CTL_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) unsigned int rtap_space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct ieee80211_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) hdr = (void *)(skb->data + rtap_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) RX_FLAG_FAILED_PLCP_CRC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) RX_FLAG_ONLY_MONITOR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) RX_FLAG_NO_PSDU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (unlikely(skb->len < 16 + present_fcs_len + rtap_space))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (ieee80211_is_ctl(hdr->frame_control) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) !ieee80211_is_pspoll(hdr->frame_control) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) !ieee80211_is_back_req(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct ieee80211_rx_status *status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* always present fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) len = sizeof(struct ieee80211_radiotap_header) + 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* allocate extra bitmaps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (status->chains)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) len += 4 * hweight8(status->chains);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* vendor presence bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) len += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (ieee80211_have_rx_timestamp(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) len = ALIGN(len, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) len += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) len += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* antenna field, if we don't have per-chain info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (!status->chains)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) len += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* padding for RX_FLAGS if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) len = ALIGN(len, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (status->encoding == RX_ENC_HT) /* HT info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) len += 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (status->flag & RX_FLAG_AMPDU_DETAILS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) len = ALIGN(len, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) len += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (status->encoding == RX_ENC_VHT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) len = ALIGN(len, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) len += 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (local->hw.radiotap_timestamp.units_pos >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) len = ALIGN(len, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) len += 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (status->encoding == RX_ENC_HE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) status->flag & RX_FLAG_RADIOTAP_HE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) len = ALIGN(len, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) len += 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) != 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (status->encoding == RX_ENC_HE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) status->flag & RX_FLAG_RADIOTAP_HE_MU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) len = ALIGN(len, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) len += 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (status->flag & RX_FLAG_NO_PSDU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) len += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) len = ALIGN(len, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) len += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) != 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (status->chains) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* antenna and antenna signal fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) len += 2 * hweight8(status->chains);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct ieee80211_vendor_radiotap *rtap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int vendor_data_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * The position to look at depends on the existence (or non-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * existence) of other elements, so take that into account...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (status->flag & RX_FLAG_RADIOTAP_HE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) vendor_data_offset +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) sizeof(struct ieee80211_radiotap_he);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) vendor_data_offset +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) sizeof(struct ieee80211_radiotap_he_mu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (status->flag & RX_FLAG_RADIOTAP_LSIG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) vendor_data_offset +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) sizeof(struct ieee80211_radiotap_lsig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) rtap = (void *)&skb->data[vendor_data_offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /* alignment for fixed 6-byte vendor data header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) len = ALIGN(len, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /* vendor data header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) len += 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (WARN_ON(rtap->align == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) rtap->align = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) len = ALIGN(len, rtap->align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) len += rtap->len + rtap->pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) int rtap_space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct ieee80211_hdr_3addr hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) u8 category;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) u8 action_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) } __packed __aligned(2) action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (!sdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (skb->len < rtap_space + sizeof(action) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) VHT_MUMIMO_GROUPS_DATA_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) skb_copy_bits(skb, rtap_space, &action, sizeof(action));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (!ieee80211_is_action(action.hdr.frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (action.category != WLAN_CATEGORY_VHT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) skb = skb_copy(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) skb_queue_tail(&sdata->skb_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) ieee80211_queue_work(&sdata->local->hw, &sdata->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * ieee80211_add_rx_radiotap_header - add radiotap header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * add a radiotap header containing all the fields which the hardware provided.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct ieee80211_rate *rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) int rtap_len, bool has_fcs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct ieee80211_radiotap_header *rthdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) unsigned char *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) __le32 *it_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) u32 it_present_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) u16 rx_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) u16 channel_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) int mpdulen, chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) unsigned long chains = status->chains;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct ieee80211_vendor_radiotap rtap = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct ieee80211_radiotap_he he = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct ieee80211_radiotap_he_mu he_mu = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct ieee80211_radiotap_lsig lsig = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (status->flag & RX_FLAG_RADIOTAP_HE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) he = *(struct ieee80211_radiotap_he *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) skb_pull(skb, sizeof(he));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) WARN_ON_ONCE(status->encoding != RX_ENC_HE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (status->flag & RX_FLAG_RADIOTAP_HE_MU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) he_mu = *(struct ieee80211_radiotap_he_mu *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) skb_pull(skb, sizeof(he_mu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) lsig = *(struct ieee80211_radiotap_lsig *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) skb_pull(skb, sizeof(lsig));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) rtap = *(struct ieee80211_vendor_radiotap *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /* rtap.len and rtap.pad are undone immediately */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) skb_pull(skb, sizeof(rtap) + rtap.len + rtap.pad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) mpdulen = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) mpdulen += FCS_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) rthdr = skb_push(skb, rtap_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) memset(rthdr, 0, rtap_len - rtap.len - rtap.pad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) it_present = &rthdr->it_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /* radiotap header, set always present flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) rthdr->it_len = cpu_to_le16(rtap_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) BIT(IEEE80211_RADIOTAP_CHANNEL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) BIT(IEEE80211_RADIOTAP_RX_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (!status->chains)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) it_present_val |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) BIT(IEEE80211_RADIOTAP_EXT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) put_unaligned_le32(it_present_val, it_present);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) it_present++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) BIT(IEEE80211_RADIOTAP_EXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) put_unaligned_le32(it_present_val, it_present);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) it_present++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) it_present_val = rtap.present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) put_unaligned_le32(it_present_val, it_present);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) pos = (void *)(it_present + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /* the order of the following fields is important */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /* IEEE80211_RADIOTAP_TSFT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (ieee80211_have_rx_timestamp(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /* padding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) while ((pos - (u8 *)rthdr) & 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) *pos++ = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) put_unaligned_le64(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) ieee80211_calculate_rx_timestamp(local, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) mpdulen, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) pos += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* IEEE80211_RADIOTAP_FLAGS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) *pos |= IEEE80211_RADIOTAP_F_FCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) *pos |= IEEE80211_RADIOTAP_F_BADFCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (status->enc_flags & RX_ENC_FLAG_SHORTPRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) pos++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* IEEE80211_RADIOTAP_RATE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (!rate || status->encoding != RX_ENC_LEGACY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * Without rate information don't add it. If we have,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * MCS information is a separate field in radiotap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * added below. The byte here is needed as padding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * for the channel though, so initialise it to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) *pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) int shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (status->bw == RATE_INFO_BW_10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) shift = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) else if (status->bw == RATE_INFO_BW_5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) shift = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) pos++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /* IEEE80211_RADIOTAP_CHANNEL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /* TODO: frequency offset in KHz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) put_unaligned_le16(status->freq, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) pos += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (status->bw == RATE_INFO_BW_10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) channel_flags |= IEEE80211_CHAN_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) else if (status->bw == RATE_INFO_BW_5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) channel_flags |= IEEE80211_CHAN_QUARTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (status->band == NL80211_BAND_5GHZ ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) status->band == NL80211_BAND_6GHZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) else if (status->encoding != RX_ENC_LEGACY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) else if (rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) channel_flags |= IEEE80211_CHAN_2GHZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) put_unaligned_le16(channel_flags, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) pos += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) *pos = status->signal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) rthdr->it_present |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) pos++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (!status->chains) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /* IEEE80211_RADIOTAP_ANTENNA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) *pos = status->antenna;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) pos++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /* IEEE80211_RADIOTAP_RX_FLAGS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /* ensure 2 byte alignment for the 2 byte field as required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if ((pos - (u8 *)rthdr) & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) *pos++ = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) put_unaligned_le16(rx_flags, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) pos += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (status->encoding == RX_ENC_HT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) unsigned int stbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) *pos++ = local->hw.radiotap_mcs_details;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) *pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) *pos |= IEEE80211_RADIOTAP_MCS_SGI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (status->bw == RATE_INFO_BW_40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (status->enc_flags & RX_ENC_FLAG_HT_GF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (status->enc_flags & RX_ENC_FLAG_LDPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) stbc = (status->enc_flags & RX_ENC_FLAG_STBC_MASK) >> RX_ENC_FLAG_STBC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) pos++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) *pos++ = status->rate_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (status->flag & RX_FLAG_AMPDU_DETAILS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) u16 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /* ensure 4 byte alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) while ((pos - (u8 *)rthdr) & 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) pos++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) rthdr->it_present |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) put_unaligned_le32(status->ampdu_reference, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) pos += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (status->flag & RX_FLAG_AMPDU_IS_LAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (status->flag & RX_FLAG_AMPDU_EOF_BIT_KNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) flags |= IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (status->flag & RX_FLAG_AMPDU_EOF_BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) flags |= IEEE80211_RADIOTAP_AMPDU_EOF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) put_unaligned_le16(flags, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) pos += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) *pos++ = status->ampdu_delimiter_crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) *pos++ = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) *pos++ = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (status->encoding == RX_ENC_VHT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) u16 known = local->hw.radiotap_vht_details;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) put_unaligned_le16(known, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) pos += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /* flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) /* in VHT, STBC is binary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (status->enc_flags & RX_ENC_FLAG_STBC_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (status->enc_flags & RX_ENC_FLAG_BF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) pos++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /* bandwidth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) switch (status->bw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) case RATE_INFO_BW_80:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) *pos++ = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) case RATE_INFO_BW_160:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) *pos++ = 11;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) case RATE_INFO_BW_40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) *pos++ = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) *pos++ = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* MCS/NSS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) *pos = (status->rate_idx << 4) | status->nss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) pos += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /* coding field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (status->enc_flags & RX_ENC_FLAG_LDPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) pos++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) /* group ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) pos++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /* partial_aid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) pos += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (local->hw.radiotap_timestamp.units_pos >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) u16 accuracy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) u8 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) rthdr->it_present |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) cpu_to_le32(1 << IEEE80211_RADIOTAP_TIMESTAMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /* ensure 8 byte alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) while ((pos - (u8 *)rthdr) & 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) pos++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) put_unaligned_le64(status->device_timestamp, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) pos += sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (local->hw.radiotap_timestamp.accuracy >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) accuracy = local->hw.radiotap_timestamp.accuracy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) put_unaligned_le16(accuracy, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) pos += sizeof(u16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) *pos++ = local->hw.radiotap_timestamp.units_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) *pos++ = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (status->encoding == RX_ENC_HE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) status->flag & RX_FLAG_RADIOTAP_HE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) #define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) he.data6 |= HE_PREP(DATA6_NSTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) FIELD_GET(RX_ENC_FLAG_STBC_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) status->enc_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) he.data3 |= HE_PREP(DATA3_STBC, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) he.data6 |= HE_PREP(DATA6_NSTS, status->nss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) #define CHECK_GI(s) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) (int)NL80211_RATE_INFO_HE_GI_##s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) CHECK_GI(0_8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) CHECK_GI(1_6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) CHECK_GI(3_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) he.data3 |= HE_PREP(DATA3_DATA_MCS, status->rate_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) he.data3 |= HE_PREP(DATA3_DATA_DCM, status->he_dcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) he.data3 |= HE_PREP(DATA3_CODING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) !!(status->enc_flags & RX_ENC_FLAG_LDPC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) he.data5 |= HE_PREP(DATA5_GI, status->he_gi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) switch (status->bw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) case RATE_INFO_BW_20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) case RATE_INFO_BW_40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) case RATE_INFO_BW_80:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) case RATE_INFO_BW_160:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) case RATE_INFO_BW_HE_RU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) #define CHECK_RU_ALLOC(s) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) CHECK_RU_ALLOC(26);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) CHECK_RU_ALLOC(52);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) CHECK_RU_ALLOC(106);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) CHECK_RU_ALLOC(242);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) CHECK_RU_ALLOC(484);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) CHECK_RU_ALLOC(996);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) CHECK_RU_ALLOC(2x996);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) status->he_ru + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) WARN_ONCE(1, "Invalid SU BW %d\n", status->bw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /* ensure 2 byte alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) while ((pos - (u8 *)rthdr) & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) pos++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) memcpy(pos, &he, sizeof(he));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) pos += sizeof(he);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (status->encoding == RX_ENC_HE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) status->flag & RX_FLAG_RADIOTAP_HE_MU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /* ensure 2 byte alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) while ((pos - (u8 *)rthdr) & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) pos++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) memcpy(pos, &he_mu, sizeof(he_mu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) pos += sizeof(he_mu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (status->flag & RX_FLAG_NO_PSDU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) rthdr->it_present |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) cpu_to_le32(1 << IEEE80211_RADIOTAP_ZERO_LEN_PSDU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) *pos++ = status->zero_length_psdu_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /* ensure 2 byte alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) while ((pos - (u8 *)rthdr) & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) pos++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_LSIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) memcpy(pos, &lsig, sizeof(lsig));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) pos += sizeof(lsig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) *pos++ = status->chain_signal[chain];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) *pos++ = chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) /* ensure 2 byte alignment for the vendor field as required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if ((pos - (u8 *)rthdr) & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) *pos++ = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) *pos++ = rtap.oui[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) *pos++ = rtap.oui[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) *pos++ = rtap.oui[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) *pos++ = rtap.subns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) put_unaligned_le16(rtap.len, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) pos += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) /* align the actual payload as requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) while ((pos - (u8 *)rthdr) & (rtap.align - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) *pos++ = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /* data (and possible padding) already follows */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) static struct sk_buff *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) ieee80211_make_monitor_skb(struct ieee80211_local *local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) struct sk_buff **origskb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct ieee80211_rate *rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) int rtap_space, bool use_origskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(*origskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) int rt_hdrlen, needed_headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) /* room for the radiotap header based on driver features */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, *origskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) needed_headroom = rt_hdrlen - rtap_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (use_origskb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /* only need to expand headroom if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) skb = *origskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) *origskb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * This shouldn't trigger often because most devices have an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * RX header they pull before we get here, and that should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * be big enough for our radiotap information. We should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * probably export the length to drivers so that we can have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * them allocate enough headroom to start with.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (skb_headroom(skb) < needed_headroom &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * Need to make a copy and possibly remove radiotap header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * and FCS from the original.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) skb = skb_copy_expand(*origskb, needed_headroom, 0, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) /* prepend radiotap information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) skb->pkt_type = PACKET_OTHERHOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) skb->protocol = htons(ETH_P_802_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * This function copies a received frame to all monitor interfaces and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * returns a cleaned-up SKB that no longer includes the FCS nor the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * radiotap header the driver might have added.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) static struct sk_buff *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) struct ieee80211_rate *rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) struct ieee80211_sub_if_data *sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) struct sk_buff *monskb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) int present_fcs_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) unsigned int rtap_space = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) struct ieee80211_sub_if_data *monitor_sdata =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) rcu_dereference(local->monitor_sdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) bool only_monitor = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) unsigned int min_head_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (status->flag & RX_FLAG_RADIOTAP_HE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) rtap_space += sizeof(struct ieee80211_radiotap_he);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) rtap_space += sizeof(struct ieee80211_radiotap_he_mu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) if (status->flag & RX_FLAG_RADIOTAP_LSIG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) rtap_space += sizeof(struct ieee80211_radiotap_lsig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) struct ieee80211_vendor_radiotap *rtap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) (void *)(origskb->data + rtap_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) rtap_space += sizeof(*rtap) + rtap->len + rtap->pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) min_head_len = rtap_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * First, we may need to make a copy of the skb because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * (1) we need to modify it for radiotap (if not present), and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * (2) the other RX handlers will modify the skb we got.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * We don't need to, of course, if we aren't going to return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * the SKB because it has a bad FCS/PLCP checksum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (!(status->flag & RX_FLAG_NO_PSDU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (unlikely(origskb->len <= FCS_LEN + rtap_space)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /* driver bug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) dev_kfree_skb(origskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) present_fcs_len = FCS_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /* also consider the hdr->frame_control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) min_head_len += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) /* ensure that the expected data elements are in skb head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (!pskb_may_pull(origskb, min_head_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) dev_kfree_skb(origskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) only_monitor = should_drop_frame(origskb, present_fcs_len, rtap_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (only_monitor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) dev_kfree_skb(origskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) return ieee80211_clean_skb(origskb, present_fcs_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) rtap_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) bool last_monitor = list_is_last(&sdata->u.mntr.list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) &local->mon_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (!monskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) monskb = ieee80211_make_monitor_skb(local, &origskb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) rate, rtap_space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) only_monitor &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) last_monitor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (monskb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (last_monitor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) skb = monskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) monskb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) skb = skb_clone(monskb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) skb->dev = sdata->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) ieee80211_rx_stats(skb->dev, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) netif_receive_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (last_monitor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) /* this happens if last_monitor was erroneously false */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) dev_kfree_skb(monskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) /* ditto */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (!origskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return ieee80211_clean_skb(origskb, present_fcs_len, rtap_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) int tid, seqno_idx, security_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) /* does the frame have a qos control field? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (ieee80211_is_data_qos(hdr->frame_control)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) u8 *qc = ieee80211_get_qos_ctl(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /* frame has qos control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) status->rx_flags |= IEEE80211_RX_AMSDU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) seqno_idx = tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) security_idx = tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * Sequence numbers for management frames, QoS data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * frames with a broadcast/multicast address in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * Address 1 field, and all non-QoS data frames sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * by QoS STAs are assigned using an additional single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * modulo-4096 counter, [...]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * We also use that counter for non-QoS STAs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) seqno_idx = IEEE80211_NUM_TIDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) security_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (ieee80211_is_mgmt(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) security_idx = IEEE80211_NUM_TIDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) tid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) rx->seqno_idx = seqno_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) rx->security_idx = security_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) /* Set skb->priority to 1d tag if highest order bit of TID is not set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * For now, set skb->priority to 0 for other cases. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) rx->skb->priority = (tid > 7) ? 0 : tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * DOC: Packet alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * Drivers always need to pass packets that are aligned to two-byte boundaries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * to the stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * Additionally, should, if possible, align the payload data in a way that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * guarantees that the contained IP header is aligned to a four-byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * boundary. In the case of regular frames, this simply means aligning the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * payload to a four-byte boundary (because either the IP header is directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * contained, or IV/RFC1042 headers that have a length divisible by four are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * in front of it). If the payload data is not properly aligned and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * architecture doesn't support efficient unaligned operations, mac80211
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * will align the data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * With A-MSDU frames, however, the payload data address must yield two modulo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * four because there are 14-byte 802.3 headers within the A-MSDU frames that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * push the IP header further back to a multiple of four again. Thankfully, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * specs were sane enough this time around to require padding each A-MSDU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * subframe to a length that is a multiple of four.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * Padding like Atheros hardware adds which is between the 802.11 header and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * the payload is not supported, the driver is required to move the 802.11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * header to be directly in front of the payload in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) WARN_ON_ONCE((unsigned long)rx->skb->data & 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) /* rx handlers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (is_multicast_ether_addr(hdr->addr1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) return ieee80211_is_robust_mgmt_frame(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (!is_multicast_ether_addr(hdr->addr1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return ieee80211_is_robust_mgmt_frame(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) struct ieee80211_mmie *mmie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) struct ieee80211_mmie_16 *mmie16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (!ieee80211_is_robust_mgmt_frame(skb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) !ieee80211_is_beacon(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) return -1; /* not a robust management frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) mmie = (struct ieee80211_mmie *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) (skb->data + skb->len - sizeof(*mmie));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (mmie->element_id == WLAN_EID_MMIE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) mmie->length == sizeof(*mmie) - 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) return le16_to_cpu(mmie->key_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) mmie16 = (struct ieee80211_mmie_16 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) (skb->data + skb->len - sizeof(*mmie16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (skb->len >= 24 + sizeof(*mmie16) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) mmie16->element_id == WLAN_EID_MMIE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) mmie16->length == sizeof(*mmie16) - 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return le16_to_cpu(mmie16->key_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) static int ieee80211_get_keyid(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) const struct ieee80211_cipher_scheme *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) __le16 fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) int hdrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) int minlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) u8 key_idx_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) u8 key_idx_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) u8 keyid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) fc = hdr->frame_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) hdrlen = ieee80211_hdrlen(fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (cs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) minlen = hdrlen + cs->hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) key_idx_off = hdrlen + cs->key_idx_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) key_idx_shift = cs->key_idx_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) /* WEP, TKIP, CCMP and GCMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) minlen = hdrlen + IEEE80211_WEP_IV_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) key_idx_off = hdrlen + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) key_idx_shift = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) if (unlikely(skb->len < minlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) skb_copy_bits(skb, key_idx_off, &keyid, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) keyid &= cs->key_idx_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) keyid >>= key_idx_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) /* cs could use more than the usual two bits for the keyid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (unlikely(keyid >= NUM_DEFAULT_KEYS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) return keyid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) char *dev_addr = rx->sdata->vif.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (ieee80211_is_data(hdr->frame_control)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (is_multicast_ether_addr(hdr->addr1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) if (ieee80211_has_tods(hdr->frame_control) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) !ieee80211_has_fromds(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (ether_addr_equal(hdr->addr3, dev_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (!ieee80211_has_a4(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (ether_addr_equal(hdr->addr4, dev_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) /* If there is not an established peer link and this is not a peer link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * establisment frame, beacon or probe, drop the frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) struct ieee80211_mgmt *mgmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) if (!ieee80211_is_mgmt(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (ieee80211_is_action(hdr->frame_control)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) u8 category;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) /* make sure category field is present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) mgmt = (struct ieee80211_mgmt *)hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) category = mgmt->u.action.category;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (category != WLAN_CATEGORY_MESH_ACTION &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) category != WLAN_CATEGORY_SELF_PROTECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (ieee80211_is_probe_req(hdr->frame_control) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) ieee80211_is_probe_resp(hdr->frame_control) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) ieee80211_is_beacon(hdr->frame_control) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) ieee80211_is_auth(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) struct sk_buff *tail = skb_peek_tail(frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) struct ieee80211_rx_status *status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) if (!tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) status = IEEE80211_SKB_RXCB(tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (status->flag & RX_FLAG_AMSDU_MORE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) struct tid_ampdu_rx *tid_agg_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) struct sk_buff_head *frames)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) struct ieee80211_rx_status *status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) lockdep_assert_held(&tid_agg_rx->reorder_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) if (skb_queue_empty(skb_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) goto no_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) __skb_queue_purge(skb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) goto no_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) /* release frames from the reorder ring buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) tid_agg_rx->stored_mpdu_num--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) while ((skb = __skb_dequeue(skb_list))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) status = IEEE80211_SKB_RXCB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) __skb_queue_tail(frames, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) no_frame:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) struct tid_ampdu_rx *tid_agg_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) u16 head_seq_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) struct sk_buff_head *frames)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) lockdep_assert_held(&tid_agg_rx->reorder_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) * the skb was added to the buffer longer than this time ago, the earlier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) * frames that have not yet been received are assumed to be lost and the skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * can be released for processing. This may also release other skb's from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) * reorder buffer if there are no additional gaps between the frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) * Callers must hold tid_agg_rx->reorder_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) struct tid_ampdu_rx *tid_agg_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) struct sk_buff_head *frames)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) int index, i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) lockdep_assert_held(&tid_agg_rx->reorder_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) /* release the buffer until next missing frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) tid_agg_rx->stored_mpdu_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * No buffers ready to be released, but check whether any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * frames in the reorder buffer have timed out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) int skipped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) j = (j + 1) % tid_agg_rx->buf_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) skipped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (skipped &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) !time_after(jiffies, tid_agg_rx->reorder_time[j] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) HT_RX_REORDER_BUF_TIMEOUT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) goto set_release_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) /* don't leave incomplete A-MSDUs around */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) for (i = (index + 1) % tid_agg_rx->buf_size; i != j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) i = (i + 1) % tid_agg_rx->buf_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) __skb_queue_purge(&tid_agg_rx->reorder_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) ht_dbg_ratelimited(sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) "release an RX reorder frame due to timeout on earlier frames\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) ieee80211_release_reorder_frame(sdata, tid_agg_rx, j,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * Increment the head seq# also for the skipped slots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) tid_agg_rx->head_seq_num =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) (tid_agg_rx->head_seq_num +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) skipped) & IEEE80211_SN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) skipped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) } else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (tid_agg_rx->stored_mpdu_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) for (; j != (index - 1) % tid_agg_rx->buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) j = (j + 1) % tid_agg_rx->buf_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (ieee80211_rx_reorder_ready(tid_agg_rx, j))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) set_release_timer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) if (!tid_agg_rx->removed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) mod_timer(&tid_agg_rx->reorder_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) tid_agg_rx->reorder_time[j] + 1 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) HT_RX_REORDER_BUF_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) del_timer(&tid_agg_rx->reorder_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) * As this function belongs to the RX path it must be under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) * rcu_read_lock protection. It returns false if the frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) * can be processed immediately, true if it was consumed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) struct tid_ampdu_rx *tid_agg_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) struct sk_buff_head *frames)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) u16 sc = le16_to_cpu(hdr->seq_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) u16 head_seq_num, buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) bool ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) spin_lock(&tid_agg_rx->reorder_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) * Offloaded BA sessions have no known starting sequence number so pick
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) * one from first Rxed frame for this tid after BA was started.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) if (unlikely(tid_agg_rx->auto_seq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) tid_agg_rx->auto_seq = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) tid_agg_rx->ssn = mpdu_seq_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) tid_agg_rx->head_seq_num = mpdu_seq_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) buf_size = tid_agg_rx->buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) head_seq_num = tid_agg_rx->head_seq_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) * If the current MPDU's SN is smaller than the SSN, it shouldn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) * be reordered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) if (unlikely(!tid_agg_rx->started)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) tid_agg_rx->started = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) /* frame with out of date sequence number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) * If frame the sequence number exceeds our buffering window
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) * size release some previous frames to make room for this one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) head_seq_num = ieee80211_sn_inc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) ieee80211_sn_sub(mpdu_seq_num, buf_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) /* release stored frames up to new head to stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) ieee80211_release_reorder_frames(sdata, tid_agg_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) head_seq_num, frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) /* Now the new frame is always in the range of the reordering buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) index = mpdu_seq_num % tid_agg_rx->buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) /* check if we already stored this frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * If the current MPDU is in the right order and nothing else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) * is stored we can process it directly, no need to buffer it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * If it is first but there's something stored, we may be able
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * to release frames after this one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) tid_agg_rx->stored_mpdu_num == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) if (!(status->flag & RX_FLAG_AMSDU_MORE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) tid_agg_rx->head_seq_num =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) ieee80211_sn_inc(tid_agg_rx->head_seq_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) /* put the frame in the reordering buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) tid_agg_rx->reorder_time[index] = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) tid_agg_rx->stored_mpdu_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) spin_unlock(&tid_agg_rx->reorder_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * true if the MPDU was buffered, false if it should be processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) struct sk_buff_head *frames)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) struct sk_buff *skb = rx->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) struct ieee80211_local *local = rx->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) struct sta_info *sta = rx->sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) struct tid_ampdu_rx *tid_agg_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) u16 sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) u8 tid, ack_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) if (!ieee80211_is_data_qos(hdr->frame_control) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) is_multicast_ether_addr(hdr->addr1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) goto dont_reorder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) * filter the QoS data rx stream according to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) * STA/TID and check if this STA/TID is on aggregation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (!sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) goto dont_reorder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) ack_policy = *ieee80211_get_qos_ctl(hdr) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) IEEE80211_QOS_CTL_ACK_POLICY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) tid = ieee80211_get_tid(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) if (!tid_agg_rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) WLAN_BACK_RECIPIENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) WLAN_REASON_QSTA_REQUIRE_SETUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) goto dont_reorder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) /* qos null data frames are excluded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) goto dont_reorder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) /* not part of a BA session */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) goto dont_reorder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) /* new, potentially un-ordered, ampdu frame - process it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) /* reset session timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) if (tid_agg_rx->timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) tid_agg_rx->last_rx = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) /* if this mpdu is fragmented - terminate rx aggregation session */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) sc = le16_to_cpu(hdr->seq_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (sc & IEEE80211_SCTL_FRAG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) skb_queue_tail(&rx->sdata->skb_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) ieee80211_queue_work(&local->hw, &rx->sdata->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) * No locking needed -- we will only ever process one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) * RX packet at a time, and thus own tid_agg_rx. All
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) * other code manipulating it needs to (and does) make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) * sure that we cannot get to it any more before doing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) * anything with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) frames))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) dont_reorder:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) __skb_queue_tail(frames, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) static ieee80211_rx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (status->flag & RX_FLAG_DUP_VALIDATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) * Drop duplicate 802.11 retransmissions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) if (rx->skb->len < 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) if (ieee80211_is_ctl(hdr->frame_control) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) ieee80211_is_any_nullfunc(hdr->frame_control) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) is_multicast_ether_addr(hdr->addr1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) if (!rx->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) rx->sta->rx_stats.num_duplicates++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) return RX_DROP_UNUSABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) static ieee80211_rx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) /* Drop disallowed frame classes based on STA auth/assoc state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) * IEEE 802.11, Chap 5.5.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) * mac80211 filters only based on association state, i.e. it drops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) * Class 3 frames from not associated stations. hostapd sends
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) * deauth/disassoc frames when needed. In addition, hostapd is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) * responsible for filtering on both auth and assoc states.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) if (ieee80211_vif_is_mesh(&rx->sdata->vif))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) return ieee80211_rx_mesh_check(rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) if (unlikely((ieee80211_is_data(hdr->frame_control) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) ieee80211_is_pspoll(hdr->frame_control)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) rx->sdata->vif.type != NL80211_IFTYPE_OCB &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) * accept port control frames from the AP even when it's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) * yet marked ASSOC to prevent a race where we don't set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) * assoc bit quickly enough before it sends the first frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) ieee80211_is_data_present(hdr->frame_control)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) unsigned int hdrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) __be16 ethertype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) hdrlen = ieee80211_hdrlen(hdr->frame_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) if (rx->skb->len < hdrlen + 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) skb_copy_bits(rx->skb, hdrlen + 6, ðertype, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) if (ethertype == rx->sdata->control_port_protocol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) cfg80211_rx_spurious_frame(rx->sdata->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) hdr->addr2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) GFP_ATOMIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) return RX_DROP_UNUSABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) static ieee80211_rx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) struct ieee80211_local *local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) struct ieee80211_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) local = rx->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) skb = rx->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) hdr = (struct ieee80211_hdr *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) if (!local->pspolling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) if (!ieee80211_has_fromds(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) /* this is not from AP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) if (!ieee80211_is_data(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) if (!ieee80211_has_moredata(hdr->frame_control)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) /* AP has no more frames buffered for us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) local->pspolling = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) /* more data bit is set, let's request a new frame from the AP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) ieee80211_send_pspoll(local, rx->sdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) static void sta_ps_start(struct sta_info *sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) struct ieee80211_sub_if_data *sdata = sta->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) struct ps_data *ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) int tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) ps = &sdata->bss->ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) atomic_inc(&ps->num_sta_ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) set_sta_flag(sta, WLAN_STA_PS_STA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) sta->sta.addr, sta->sta.aid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) ieee80211_clear_fast_xmit(sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (!sta->sta.txq[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) struct ieee80211_txq *txq = sta->sta.txq[tid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) struct txq_info *txqi = to_txq_info(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) spin_lock(&local->active_txq_lock[txq->ac]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) if (!list_empty(&txqi->schedule_order))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) list_del_init(&txqi->schedule_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) spin_unlock(&local->active_txq_lock[txq->ac]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) if (txq_has_queue(txq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) set_bit(tid, &sta->txq_buffered_tids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) clear_bit(tid, &sta->txq_buffered_tids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) static void sta_ps_end(struct sta_info *sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) sta->sta.addr, sta->sta.aid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) * Clear the flag only if the other one is still set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) * so that the TX path won't start TX'ing new frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) * directly ... In the case that the driver flag isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) * set ieee80211_sta_ps_deliver_wakeup() will clear it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) clear_sta_flag(sta, WLAN_STA_PS_STA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) sta->sta.addr, sta->sta.aid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) set_sta_flag(sta, WLAN_STA_PS_DELIVER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) clear_sta_flag(sta, WLAN_STA_PS_STA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) ieee80211_sta_ps_deliver_wakeup(sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) bool in_ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) /* Don't let the same PS state be set twice */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) in_ps = test_sta_flag(sta, WLAN_STA_PS_STA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) if ((start && in_ps) || (!start && !in_ps))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) if (start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) sta_ps_start(sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) sta_ps_end(sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) EXPORT_SYMBOL(ieee80211_sta_ps_transition);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) if (test_sta_flag(sta, WLAN_STA_SP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) ieee80211_sta_ps_deliver_poll_response(sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) set_sta_flag(sta, WLAN_STA_PSPOLL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) EXPORT_SYMBOL(ieee80211_sta_pspoll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) int ac = ieee80211_ac_from_tid(tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) * If this AC is not trigger-enabled do nothing unless the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) * driver is calling us after it already checked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) * NB: This could/should check a separate bitmap of trigger-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) * enabled queues, but for now we only implement uAPSD w/o
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) * TSPEC changes to the ACs, so they're always the same.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) if (!(sta->sta.uapsd_queues & ieee80211_ac_to_qos_mask[ac]) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) tid != IEEE80211_NUM_TIDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) /* if we are in a service period, do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) if (test_sta_flag(sta, WLAN_STA_SP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) ieee80211_sta_ps_deliver_uapsd(sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) set_sta_flag(sta, WLAN_STA_UAPSD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) EXPORT_SYMBOL(ieee80211_sta_uapsd_trigger);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) static ieee80211_rx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) struct ieee80211_sub_if_data *sdata = rx->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) struct ieee80211_hdr *hdr = (void *)rx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) if (!rx->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) if (sdata->vif.type != NL80211_IFTYPE_AP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) * The device handles station powersave, so don't do anything about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) * uAPSD and PS-Poll frames (the latter shouldn't even come up from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) * it to mac80211 since they're handled.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) * Don't do anything if the station isn't already asleep. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) * the uAPSD case, the station will probably be marked asleep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) * in the PS-Poll case the station must be confused ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) ieee80211_sta_pspoll(&rx->sta->sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) /* Free PS Poll skb here instead of returning RX_DROP that would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) * count as an dropped frame. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) dev_kfree_skb(rx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) return RX_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) } else if (!ieee80211_has_morefrags(hdr->frame_control) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) ieee80211_has_pm(hdr->frame_control) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) (ieee80211_is_data_qos(hdr->frame_control) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) ieee80211_is_qos_nullfunc(hdr->frame_control))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) u8 tid = ieee80211_get_tid(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) static ieee80211_rx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) struct sta_info *sta = rx->sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) struct sk_buff *skb = rx->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) if (!sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) * Update last_rx only for IBSS packets which are for the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) * BSSID and for station already AUTHORIZED to avoid keeping the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) * current IBSS network alive in cases where other STAs start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) * using different BSSID. This will also give the station another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) * chance to restart the authentication/authorization in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) * something went wrong the first time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) NL80211_IFTYPE_ADHOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) sta->rx_stats.last_rx = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) if (ieee80211_is_data(hdr->frame_control) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) !is_multicast_ether_addr(hdr->addr1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) sta->rx_stats.last_rate =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) sta_stats_encode_rate(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) sta->rx_stats.last_rx = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) } else if (!ieee80211_is_s1g_beacon(hdr->frame_control) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) !is_multicast_ether_addr(hdr->addr1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) * Mesh beacons will update last_rx when if they are found to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) * match the current local configuration when processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) sta->rx_stats.last_rx = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) if (ieee80211_is_data(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) sta->rx_stats.last_rate = sta_stats_encode_rate(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) sta->rx_stats.fragments++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) u64_stats_update_begin(&rx->sta->rx_stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) sta->rx_stats.bytes += rx->skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) u64_stats_update_end(&rx->sta->rx_stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) sta->rx_stats.last_signal = status->signal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (status->chains) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) sta->rx_stats.chains = status->chains;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) int signal = status->chain_signal[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) if (!(status->chains & BIT(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) sta->rx_stats.chain_signal_last[i] = signal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) -signal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) if (ieee80211_is_s1g_beacon(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) * Change STA power saving mode only at the end of a frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) * exchange sequence, and only for a data or management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) * frame as specified in IEEE 802.11-2016 11.2.3.2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) !ieee80211_has_morefrags(hdr->frame_control) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) !is_multicast_ether_addr(hdr->addr1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) (ieee80211_is_mgmt(hdr->frame_control) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) ieee80211_is_data(hdr->frame_control)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) if (!ieee80211_has_pm(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) sta_ps_end(sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (ieee80211_has_pm(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) sta_ps_start(sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) /* mesh power save support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) if (ieee80211_vif_is_mesh(&rx->sdata->vif))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) ieee80211_mps_rx_h_sta_process(sta, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) * Drop (qos-)data::nullfunc frames silently, since they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) * are used only to control station power saving mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) if (ieee80211_is_any_nullfunc(hdr->frame_control)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) * If we receive a 4-addr nullfunc frame from a STA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) * that was not moved to a 4-addr STA vlan yet send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) * the event to userspace and for older hostapd drop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) * the frame to the monitor interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) if (ieee80211_has_a4(hdr->frame_control) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) !rx->sdata->u.vlan.sta))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) cfg80211_rx_unexpected_4addr_frame(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) rx->sdata->dev, sta->sta.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) * Update counter and free packet here to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) * counting this as a dropped packed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) sta->rx_stats.packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) dev_kfree_skb(rx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) return RX_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) } /* ieee80211_rx_h_sta_process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) static struct ieee80211_key *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) ieee80211_rx_get_bigtk(struct ieee80211_rx_data *rx, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) struct ieee80211_key *key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) struct ieee80211_sub_if_data *sdata = rx->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) int idx2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) /* Make sure key gets set if either BIGTK key index is set so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) * ieee80211_drop_unencrypted_mgmt() can properly drop both unprotected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) * Beacon frames and Beacon frames that claim to use another BIGTK key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) * index (i.e., a key that we do not have).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) if (idx < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) idx = NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) idx2 = idx + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) if (idx == NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) idx2 = idx + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) idx2 = idx - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) if (rx->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) key = rcu_dereference(rx->sta->gtk[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) if (!key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) key = rcu_dereference(sdata->keys[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) if (!key && rx->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) key = rcu_dereference(rx->sta->gtk[idx2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) if (!key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) key = rcu_dereference(sdata->keys[idx2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) return key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) static ieee80211_rx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) struct sk_buff *skb = rx->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) int keyidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) ieee80211_rx_result result = RX_DROP_UNUSABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) struct ieee80211_key *sta_ptk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) struct ieee80211_key *ptk_idx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) int mmie_keyidx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) __le16 fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) const struct ieee80211_cipher_scheme *cs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) if (ieee80211_is_ext(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) * Key selection 101
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) * There are five types of keys:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) * - GTK (group keys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) * - IGTK (group keys for management frames)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) * - BIGTK (group keys for Beacon frames)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) * - PTK (pairwise keys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) * - STK (station-to-station pairwise keys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) * When selecting a key, we have to distinguish between multicast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) * (including broadcast) and unicast frames, the latter can only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) * use PTKs and STKs while the former always use GTKs, IGTKs, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) * BIGTKs. Unless, of course, actual WEP keys ("pre-RSNA") are used,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) * then unicast frames can also use key indices like GTKs. Hence, if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) * don't have a PTK/STK we check the key index for a WEP key.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) * Note that in a regular BSS, multicast frames are sent by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) * AP only, associated stations unicast the frame to the AP first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) * which then multicasts it on their behalf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) * There is also a slight problem in IBSS mode: GTKs are negotiated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) * with each station, that is something we don't currently handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) * The spec seems to expect that one negotiates the same key with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) * every station but there's no such requirement; VLANs could be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) * possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) /* start without a key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) rx->key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) fc = hdr->frame_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) if (rx->sta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) int keyid = rx->sta->ptk_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) sta_ptk = rcu_dereference(rx->sta->ptk[keyid]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) if (ieee80211_has_protected(fc) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) !(status->flag & RX_FLAG_IV_STRIPPED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) cs = rx->sta->cipher_scheme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) keyid = ieee80211_get_keyid(rx->skb, cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) if (unlikely(keyid < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) return RX_DROP_UNUSABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) ptk_idx = rcu_dereference(rx->sta->ptk[keyid]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) if (!ieee80211_has_protected(fc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) rx->key = ptk_idx ? ptk_idx : sta_ptk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) if ((status->flag & RX_FLAG_DECRYPTED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) (status->flag & RX_FLAG_IV_STRIPPED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) /* Skip decryption if the frame is not protected. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) if (!ieee80211_has_protected(fc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) } else if (mmie_keyidx >= 0 && ieee80211_is_beacon(fc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) /* Broadcast/multicast robust management frame / BIP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) if ((status->flag & RX_FLAG_DECRYPTED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) (status->flag & RX_FLAG_IV_STRIPPED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) if (mmie_keyidx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) NUM_DEFAULT_BEACON_KEYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) return RX_DROP_MONITOR; /* unexpected BIP keyidx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) rx->key = ieee80211_rx_get_bigtk(rx, mmie_keyidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) if (!rx->key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) return RX_CONTINUE; /* Beacon protection not in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) } else if (mmie_keyidx >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) /* Broadcast/multicast robust management frame / BIP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) if ((status->flag & RX_FLAG_DECRYPTED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) (status->flag & RX_FLAG_IV_STRIPPED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) if (mmie_keyidx < NUM_DEFAULT_KEYS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) return RX_DROP_MONITOR; /* unexpected BIP keyidx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) if (rx->sta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) if (ieee80211_is_group_privacy_action(skb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) test_sta_flag(rx->sta, WLAN_STA_MFP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) if (!rx->key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) } else if (!ieee80211_has_protected(fc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) * The frame was not protected, so skip decryption. However, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) * need to set rx->key if there is a key that could have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) * used so that the frame may be dropped if encryption would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) * have been expected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) struct ieee80211_key *key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) struct ieee80211_sub_if_data *sdata = rx->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) if (ieee80211_is_beacon(fc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) key = ieee80211_rx_get_bigtk(rx, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) } else if (ieee80211_is_mgmt(fc) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) is_multicast_ether_addr(hdr->addr1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) key = rcu_dereference(rx->sdata->default_mgmt_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) if (rx->sta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) key = rcu_dereference(rx->sta->gtk[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) if (key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) if (!key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) key = rcu_dereference(sdata->keys[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) if (key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) if (key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) rx->key = key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) * The device doesn't give us the IV so we won't be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) * able to look up the key. That's ok though, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) * don't need to decrypt the frame, we just won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) * be able to keep statistics accurate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) * Except for key threshold notifications, should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) * we somehow allow the driver to tell us which key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) * the hardware used if this flag is set?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) if ((status->flag & RX_FLAG_DECRYPTED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) (status->flag & RX_FLAG_IV_STRIPPED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) keyidx = ieee80211_get_keyid(rx->skb, cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) if (unlikely(keyidx < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) return RX_DROP_UNUSABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) /* check per-station GTK first, if multicast packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) /* if not found, try default key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) if (!rx->key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) * RSNA-protected unicast frames should always be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) * sent with pairwise or station-to-station keys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) * but for WEP we allow using a key index as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) if (rx->key &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) !is_multicast_ether_addr(hdr->addr1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) rx->key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) if (rx->key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) /* TODO: add threshold stuff again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) switch (rx->key->conf.cipher) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) case WLAN_CIPHER_SUITE_WEP40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) case WLAN_CIPHER_SUITE_WEP104:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) result = ieee80211_crypto_wep_decrypt(rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) case WLAN_CIPHER_SUITE_TKIP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) result = ieee80211_crypto_tkip_decrypt(rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) case WLAN_CIPHER_SUITE_CCMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) result = ieee80211_crypto_ccmp_decrypt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) rx, IEEE80211_CCMP_MIC_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) case WLAN_CIPHER_SUITE_CCMP_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) result = ieee80211_crypto_ccmp_decrypt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) rx, IEEE80211_CCMP_256_MIC_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) case WLAN_CIPHER_SUITE_AES_CMAC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) result = ieee80211_crypto_aes_cmac_decrypt(rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) case WLAN_CIPHER_SUITE_BIP_CMAC_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) result = ieee80211_crypto_aes_cmac_256_decrypt(rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) case WLAN_CIPHER_SUITE_BIP_GMAC_128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) case WLAN_CIPHER_SUITE_BIP_GMAC_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) result = ieee80211_crypto_aes_gmac_decrypt(rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) case WLAN_CIPHER_SUITE_GCMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) case WLAN_CIPHER_SUITE_GCMP_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) result = ieee80211_crypto_gcmp_decrypt(rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) result = ieee80211_crypto_hw_decrypt(rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) /* the hdr variable is invalid after the decrypt handlers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) /* either the frame has been decrypted or will be dropped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) status->flag |= RX_FLAG_DECRYPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) if (unlikely(ieee80211_is_beacon(fc) && result == RX_DROP_UNUSABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) skb->data, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) skb_queue_head_init(&cache->entries[i].skb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) __skb_queue_purge(&cache->entries[i].skb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) static inline struct ieee80211_fragment_entry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) ieee80211_reassemble_add(struct ieee80211_fragment_cache *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) unsigned int frag, unsigned int seq, int rx_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) struct sk_buff **skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) struct ieee80211_fragment_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) entry = &cache->entries[cache->next++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) if (cache->next >= IEEE80211_FRAGMENT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) cache->next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) __skb_queue_purge(&entry->skb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) entry->first_frag_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) entry->seq = seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) entry->rx_queue = rx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) entry->last_frag = frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) entry->check_sequential_pn = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) entry->extra_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) static inline struct ieee80211_fragment_entry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) unsigned int frag, unsigned int seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) int rx_queue, struct ieee80211_hdr *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) struct ieee80211_fragment_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) int i, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) idx = cache->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) struct ieee80211_hdr *f_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) struct sk_buff *f_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) idx--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) if (idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) idx = IEEE80211_FRAGMENT_MAX - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) entry = &cache->entries[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) entry->rx_queue != rx_queue ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) entry->last_frag + 1 != frag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) f_skb = __skb_peek(&entry->skb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) f_hdr = (struct ieee80211_hdr *) f_skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) * Check ftype and addresses are equal, else check next fragment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) if (((hdr->frame_control ^ f_hdr->frame_control) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) !ether_addr_equal(hdr->addr1, f_hdr->addr1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) !ether_addr_equal(hdr->addr2, f_hdr->addr2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) __skb_queue_purge(&entry->skb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) static bool requires_sequential_pn(struct ieee80211_rx_data *rx, __le16 fc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) return rx->key &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) ieee80211_has_protected(fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) static ieee80211_rx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) struct ieee80211_fragment_cache *cache = &rx->sdata->frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) struct ieee80211_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) u16 sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) __le16 fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) unsigned int frag, seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) struct ieee80211_fragment_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) hdr = (struct ieee80211_hdr *)rx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) fc = hdr->frame_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) if (ieee80211_is_ctl(fc) || ieee80211_is_ext(fc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) sc = le16_to_cpu(hdr->seq_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) frag = sc & IEEE80211_SCTL_FRAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) if (rx->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) cache = &rx->sta->frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) if (is_multicast_ether_addr(hdr->addr1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) I802_DEBUG_INC(rx->local->rx_handlers_fragments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) if (skb_linearize(rx->skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) return RX_DROP_UNUSABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) * skb_linearize() might change the skb->data and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) * previously cached variables (in this case, hdr) need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) * be refreshed with the new data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) hdr = (struct ieee80211_hdr *)rx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) if (frag == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) /* This is the first fragment of a new frame. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) entry = ieee80211_reassemble_add(cache, frag, seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) rx->seqno_idx, &(rx->skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) if (requires_sequential_pn(rx, fc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) int queue = rx->security_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) /* Store CCMP/GCMP PN so that we can verify that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) * next fragment has a sequential PN value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) entry->check_sequential_pn = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) entry->is_protected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) entry->key_color = rx->key->color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) memcpy(entry->last_pn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) rx->key->u.ccmp.rx_pn[queue],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) IEEE80211_CCMP_PN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) BUILD_BUG_ON(offsetof(struct ieee80211_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) u.ccmp.rx_pn) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) offsetof(struct ieee80211_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) u.gcmp.rx_pn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) sizeof(rx->key->u.gcmp.rx_pn[queue]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) IEEE80211_GCMP_PN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) } else if (rx->key &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) (ieee80211_has_protected(fc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) (status->flag & RX_FLAG_DECRYPTED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) entry->is_protected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) entry->key_color = rx->key->color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) return RX_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) /* This is a fragment for a frame that should already be pending in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) * fragment cache. Add this fragment to the end of the pending entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) entry = ieee80211_reassemble_find(cache, frag, seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) rx->seqno_idx, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) if (!entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) /* "The receiver shall discard MSDUs and MMPDUs whose constituent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) * MPDU PN values are not incrementing in steps of 1."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) if (entry->check_sequential_pn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) if (!requires_sequential_pn(rx, fc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) return RX_DROP_UNUSABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) /* Prevent mixed key and fragment cache attacks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) if (entry->key_color != rx->key->color)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) return RX_DROP_UNUSABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) pn[i]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) if (pn[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) rpn = rx->ccm_gcm.pn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) return RX_DROP_UNUSABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) } else if (entry->is_protected &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) (!rx->key ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) (!ieee80211_has_protected(fc) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) !(status->flag & RX_FLAG_DECRYPTED)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) rx->key->color != entry->key_color)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) /* Drop this as a mixed key or fragment cache attack, even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) * if for TKIP Michael MIC should protect us, and WEP is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) * lost cause anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) return RX_DROP_UNUSABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) } else if (entry->is_protected && rx->key &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) entry->key_color != rx->key->color &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) (status->flag & RX_FLAG_DECRYPTED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) return RX_DROP_UNUSABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) skb_pull(rx->skb, ieee80211_hdrlen(fc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) __skb_queue_tail(&entry->skb_list, rx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) entry->last_frag = frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) entry->extra_len += rx->skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) if (ieee80211_has_morefrags(fc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) rx->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) return RX_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) rx->skb = __skb_dequeue(&entry->skb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) if (skb_tailroom(rx->skb) < entry->extra_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) GFP_ATOMIC))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) __skb_queue_purge(&entry->skb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) return RX_DROP_UNUSABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) while ((skb = __skb_dequeue(&entry->skb_list))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) skb_put_data(rx->skb, skb->data, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) ieee80211_led_rx(rx->local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) if (rx->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) rx->sta->rx_stats.packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) struct ieee80211_hdr *hdr = (void *)rx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) struct sk_buff *skb = rx->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) * Pass through unencrypted frames if the hardware has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) * decrypted them already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) if (status->flag & RX_FLAG_DECRYPTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) /* check mesh EAPOL frames first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) if (unlikely(rx->sta && ieee80211_vif_is_mesh(&rx->sdata->vif) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) ieee80211_is_data(fc))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) struct ieee80211s_hdr *mesh_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) u16 hdr_len = ieee80211_hdrlen(fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) u16 ethertype_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) __be16 ethertype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) if (!ether_addr_equal(hdr->addr1, rx->sdata->vif.addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) goto drop_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) /* make sure fixed part of mesh header is there, also checks skb len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) if (!pskb_may_pull(rx->skb, hdr_len + 6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) goto drop_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) mesh_hdr = (struct ieee80211s_hdr *)(skb->data + hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) ethertype_offset = hdr_len + ieee80211_get_mesh_hdrlen(mesh_hdr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) sizeof(rfc1042_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) if (skb_copy_bits(rx->skb, ethertype_offset, ðertype, 2) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) ethertype == rx->sdata->control_port_protocol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) drop_check:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) /* Drop unencrypted frames if key is set. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) if (unlikely(!ieee80211_has_protected(fc) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) !ieee80211_is_any_nullfunc(fc) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) ieee80211_is_data(fc) && rx->key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) __le16 fc = hdr->frame_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) * Pass through unencrypted frames if the hardware has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) * decrypted them already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) if (status->flag & RX_FLAG_DECRYPTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) if (unlikely(!ieee80211_has_protected(fc) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) rx->key)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) if (ieee80211_is_deauth(fc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) ieee80211_is_disassoc(fc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) rx->skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) rx->skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) /* BIP does not use Protected field, so need to check MMIE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) if (ieee80211_is_deauth(fc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) ieee80211_is_disassoc(fc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) rx->skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) rx->skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) if (unlikely(ieee80211_is_beacon(fc) && rx->key &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) rx->skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) rx->skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) * When using MFP, Action frames are not allowed prior to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) * having configured keys.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) if (unlikely(ieee80211_is_action(fc) && !rx->key &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) ieee80211_is_robust_mgmt_frame(rx->skb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) struct ieee80211_sub_if_data *sdata = rx->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) bool check_port_control = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) struct ethhdr *ehdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) *port_control = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) if (ieee80211_has_a4(hdr->frame_control) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) if (sdata->vif.type == NL80211_IFTYPE_STATION &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) if (!sdata->u.mgd.use_4addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) else if (!ether_addr_equal(hdr->addr1, sdata->vif.addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) check_port_control = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) if (is_multicast_ether_addr(hdr->addr1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) ehdr = (struct ethhdr *) rx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) if (ehdr->h_proto == rx->sdata->control_port_protocol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) *port_control = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) else if (check_port_control)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) * requires that rx->skb is a frame with ethernet header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) * Allow EAPOL frames to us/the PAE group address regardless of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) * whether the frame was encrypted or not, and always disallow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) * all other destination addresses for them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) if (unlikely(ehdr->h_proto == rx->sdata->control_port_protocol))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) return ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) ether_addr_equal(ehdr->h_dest, pae_group_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) if (ieee80211_802_1x_port_control(rx) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) ieee80211_drop_unencrypted(rx, fc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) struct ieee80211_rx_data *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) struct ieee80211_sub_if_data *sdata = rx->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) struct net_device *dev = sdata->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) if (unlikely((skb->protocol == sdata->control_port_protocol ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) (skb->protocol == cpu_to_be16(ETH_P_PREAUTH) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) !sdata->control_port_no_preauth)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) sdata->control_port_over_nl80211)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) cfg80211_rx_control_port(dev, skb, noencrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) struct ethhdr *ehdr = (void *)skb_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) memset(skb->cb, 0, sizeof(skb->cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) * 802.1X over 802.11 requires that the authenticator address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) * be used for EAPOL frames. However, 802.1X allows the use of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) * the PAE group address instead. If the interface is part of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) * a bridge and we pass the frame with the PAE group address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) * then the bridge will forward it to the network (even if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) * client was not associated yet), which isn't supposed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) * happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) * To avoid that, rewrite the destination address to our own
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) * address, so that the authenticator (e.g. hostapd) will see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) * the frame, but bridge won't forward it anywhere else. Note
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) * that due to earlier filtering, the only other address can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) * be the PAE group address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) if (unlikely(skb->protocol == sdata->control_port_protocol &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) !ether_addr_equal(ehdr->h_dest, sdata->vif.addr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) ether_addr_copy(ehdr->h_dest, sdata->vif.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) /* deliver to local stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) if (rx->list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) list_add_tail(&skb->list, rx->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) netif_receive_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) * requires that rx->skb is a frame with ethernet header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) struct ieee80211_sub_if_data *sdata = rx->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) struct net_device *dev = sdata->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) struct sk_buff *skb, *xmit_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) struct sta_info *dsta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) skb = rx->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) xmit_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) ieee80211_rx_stats(dev, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) if (rx->sta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) /* The seqno index has the same property as needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) * for non-QoS-data frames. Here we know it's a data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) * frame, so count MSDUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) u64_stats_update_begin(&rx->sta->rx_stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) rx->sta->rx_stats.msdu[rx->seqno_idx]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) u64_stats_update_end(&rx->sta->rx_stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) if ((sdata->vif.type == NL80211_IFTYPE_AP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) ehdr->h_proto != rx->sdata->control_port_protocol &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) if (is_multicast_ether_addr(ehdr->h_dest) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) ieee80211_vif_get_num_mcast_if(sdata) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) * send multicast frames both to higher layers in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) * local net stack and back to the wireless medium
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) xmit_skb = skb_copy(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) if (!xmit_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) net_info_ratelimited("%s: failed to clone multicast frame\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) } else if (!is_multicast_ether_addr(ehdr->h_dest) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) !ether_addr_equal(ehdr->h_dest, ehdr->h_source)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) dsta = sta_info_get(sdata, ehdr->h_dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) if (dsta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) * The destination station is associated to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) * this AP (in this VLAN), so send the frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) * directly to it and do not pass it to local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) * net stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) xmit_skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) /* 'align' will only take the values 0 or 2 here since all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) * frames are required to be aligned to 2-byte boundaries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) * when being passed to mac80211; the code here works just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) * as well if that isn't true, but mac80211 assumes it can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) * access fields as 2-byte aligned (e.g. for ether_addr_equal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) int align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) if (align) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) if (WARN_ON(skb_headroom(skb) < 3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) u8 *data = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) size_t len = skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) skb->data -= align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) memmove(skb->data, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) skb_set_tail_pointer(skb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) ieee80211_deliver_skb_to_local_stack(skb, rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) if (xmit_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) * Send to wireless media and increase priority by 256 to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) * keep the received priority instead of reclassifying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) * the frame (see cfg80211_classify8021d).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) xmit_skb->priority += 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) xmit_skb->protocol = htons(ETH_P_802_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) skb_reset_network_header(xmit_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) skb_reset_mac_header(xmit_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) dev_queue_xmit(xmit_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) static ieee80211_rx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) struct net_device *dev = rx->sdata->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) struct sk_buff *skb = rx->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) __le16 fc = hdr->frame_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) struct sk_buff_head frame_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) struct ethhdr ethhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) check_da = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) check_sa = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) } else switch (rx->sdata->vif.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) case NL80211_IFTYPE_AP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) case NL80211_IFTYPE_AP_VLAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) check_da = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) case NL80211_IFTYPE_STATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) if (!rx->sta ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) check_sa = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) case NL80211_IFTYPE_MESH_POINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) check_sa = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) skb->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) __skb_queue_head_init(&frame_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) if (ieee80211_data_to_8023_exthdr(skb, ðhdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) rx->sdata->vif.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) rx->sdata->vif.type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) data_offset, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) return RX_DROP_UNUSABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) rx->sdata->vif.type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) rx->local->hw.extra_tx_headroom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) check_da, check_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) while (!skb_queue_empty(&frame_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) rx->skb = __skb_dequeue(&frame_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) if (!ieee80211_frame_allowed(rx, fc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) dev_kfree_skb(rx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) ieee80211_deliver_skb(rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) return RX_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) static ieee80211_rx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) struct sk_buff *skb = rx->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) __le16 fc = hdr->frame_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) if (!(status->rx_flags & IEEE80211_RX_AMSDU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) if (unlikely(!ieee80211_is_data(fc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) if (unlikely(!ieee80211_is_data_present(fc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) switch (rx->sdata->vif.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) case NL80211_IFTYPE_AP_VLAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) if (!rx->sdata->u.vlan.sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) return RX_DROP_UNUSABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) case NL80211_IFTYPE_STATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) if (!rx->sdata->u.mgd.use_4addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) return RX_DROP_UNUSABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) return RX_DROP_UNUSABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) if (is_multicast_ether_addr(hdr->addr1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) return RX_DROP_UNUSABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) if (rx->key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) * We should not receive A-MSDUs on pre-HT connections,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) * and HT connections cannot use old ciphers. Thus drop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) * them, as in those cases we couldn't even have SPP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) * A-MSDUs or such.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) switch (rx->key->conf.cipher) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) case WLAN_CIPHER_SUITE_WEP40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) case WLAN_CIPHER_SUITE_WEP104:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) case WLAN_CIPHER_SUITE_TKIP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) return RX_DROP_UNUSABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) return __ieee80211_rx_h_amsdu(rx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) #ifdef CONFIG_MAC80211_MESH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) static ieee80211_rx_result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) struct ieee80211_hdr *fwd_hdr, *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) struct ieee80211_tx_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) struct ieee80211s_hdr *mesh_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) struct sk_buff *skb = rx->skb, *fwd_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) struct ieee80211_local *local = rx->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) struct ieee80211_sub_if_data *sdata = rx->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) u16 ac, q, hdrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) int tailroom = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) hdr = (struct ieee80211_hdr *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) hdrlen = ieee80211_hdrlen(hdr->frame_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) /* make sure fixed part of mesh header is there, also checks skb len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) if (!pskb_may_pull(rx->skb, hdrlen + 6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) /* make sure full mesh header is there, also checks skb len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) if (!pskb_may_pull(rx->skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) /* reload pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) hdr = (struct ieee80211_hdr *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) if (ieee80211_drop_unencrypted(rx, hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) /* frame is in RMC, don't forward */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) if (ieee80211_is_data(hdr->frame_control) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) is_multicast_ether_addr(hdr->addr1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) if (!ieee80211_is_data(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) if (!mesh_hdr->ttl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) if (mesh_hdr->flags & MESH_FLAGS_AE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) struct mesh_path *mppath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) char *proxied_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) char *mpp_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) if (is_multicast_ether_addr(hdr->addr1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) mpp_addr = hdr->addr3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) proxied_addr = mesh_hdr->eaddr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) } else if ((mesh_hdr->flags & MESH_FLAGS_AE) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) MESH_FLAGS_AE_A5_A6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) /* has_a4 already checked in ieee80211_rx_mesh_check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) mpp_addr = hdr->addr4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) proxied_addr = mesh_hdr->eaddr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) mppath = mpp_path_lookup(sdata, proxied_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) if (!mppath) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) mpp_path_add(sdata, proxied_addr, mpp_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) spin_lock_bh(&mppath->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) if (!ether_addr_equal(mppath->mpp, mpp_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) mppath->exp_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) spin_unlock_bh(&mppath->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) /* Frame has reached destination. Don't forward */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) if (!is_multicast_ether_addr(hdr->addr1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) ether_addr_equal(sdata->vif.addr, hdr->addr3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) ac = ieee802_1d_to_ac[skb->priority];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) q = sdata->vif.hw_queue[ac];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) if (ieee80211_queue_stopped(&local->hw, q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) skb_set_queue_mapping(skb, ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) if (!--mesh_hdr->ttl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) if (!is_multicast_ether_addr(hdr->addr1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) IEEE80211_IFSTA_MESH_CTR_INC(ifmsh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) dropped_frames_ttl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) if (!ifmsh->mshcfg.dot11MeshForwarding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) if (sdata->crypto_tx_tailroom_needed_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) tailroom = IEEE80211_ENCRYPT_TAILROOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) fwd_skb = skb_copy_expand(skb, local->tx_headroom +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) sdata->encrypt_headroom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) tailroom, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) if (!fwd_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) info = IEEE80211_SKB_CB(fwd_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) memset(info, 0, sizeof(*info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) info->control.vif = &rx->sdata->vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) info->control.jiffies = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) if (is_multicast_ether_addr(fwd_hdr->addr1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) /* update power mode indication when forwarding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) /* mesh power mode flags updated in mesh_nexthop_lookup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) /* unable to resolve next hop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) fwd_hdr->addr3, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) WLAN_REASON_MESH_PATH_NOFORWARD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) fwd_hdr->addr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) kfree_skb(fwd_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) ieee80211_add_pending_skb(local, fwd_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) if (is_multicast_ether_addr(hdr->addr1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) static ieee80211_rx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) struct ieee80211_sub_if_data *sdata = rx->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) struct ieee80211_local *local = rx->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) struct net_device *dev = sdata->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) __le16 fc = hdr->frame_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) bool port_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) if (unlikely(!ieee80211_is_data(hdr->frame_control)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) * Send unexpected-4addr-frame event to hostapd. For older versions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) * also drop the frame to cooked monitor interfaces.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) if (ieee80211_has_a4(hdr->frame_control) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) sdata->vif.type == NL80211_IFTYPE_AP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) if (rx->sta &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) cfg80211_rx_unexpected_4addr_frame(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) err = __ieee80211_data_to_8023(rx, &port_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) return RX_DROP_UNUSABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) if (!ieee80211_frame_allowed(rx, fc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) /* directly handle TDLS channel switch requests/responses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) cpu_to_be16(ETH_P_TDLS))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) struct ieee80211_tdls_data *tf = (void *)rx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) if (pskb_may_pull(rx->skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) offsetof(struct ieee80211_tdls_data, u)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) tf->payload_type == WLAN_TDLS_SNAP_RFTYPE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) tf->category == WLAN_CATEGORY_TDLS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) schedule_work(&local->tdls_chsw_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) if (rx->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) rx->sta->rx_stats.packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) return RX_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) unlikely(port_control) && sdata->bss) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) u.ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) dev = sdata->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) rx->sdata = sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) rx->skb->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) !is_multicast_ether_addr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) ((struct ethhdr *)rx->skb->data)->h_dest) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) (!local->scanning &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) mod_timer(&local->dynamic_ps_timer, jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) ieee80211_deliver_skb(rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) return RX_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) static ieee80211_rx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) struct sk_buff *skb = rx->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) struct tid_ampdu_rx *tid_agg_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) u16 start_seq_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) u16 tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) if (likely(!ieee80211_is_ctl(bar->frame_control)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) if (ieee80211_is_back_req(bar->frame_control)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) __le16 control, start_seq_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) } __packed bar_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) struct ieee80211_event event = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) .type = BAR_RX_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) if (!rx->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) &bar_data, sizeof(bar_data)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) tid = le16_to_cpu(bar_data.control) >> 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) WLAN_BACK_RECIPIENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) WLAN_REASON_QSTA_REQUIRE_SETUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) if (!tid_agg_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) event.u.ba.tid = tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) event.u.ba.ssn = start_seq_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) event.u.ba.sta = &rx->sta->sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) /* reset session timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) if (tid_agg_rx->timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) mod_timer(&tid_agg_rx->session_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) TU_TO_EXP_TIME(tid_agg_rx->timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) spin_lock(&tid_agg_rx->reorder_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) /* release stored frames up to start of BAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) start_seq_num, frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) spin_unlock(&tid_agg_rx->reorder_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) drv_event_callback(rx->local, rx->sdata, &event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) return RX_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) * After this point, we only want management frames,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) * so we can drop all remaining control frames to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) * cooked monitor interfaces.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) struct ieee80211_mgmt *mgmt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) struct ieee80211_mgmt *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) /* Not to own unicast address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) /* Not from the current AP or not associated yet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) /* Too short SA Query request frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) if (skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) skb_reserve(skb, local->hw.extra_tx_headroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) resp = skb_put_zero(skb, 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) memcpy(resp->da, mgmt->sa, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) IEEE80211_STYPE_ACTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) memcpy(resp->u.action.u.sa_query.trans_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) mgmt->u.action.u.sa_query.trans_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) WLAN_SA_QUERY_TR_ID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) ieee80211_tx_skb(sdata, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) static ieee80211_rx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) if (ieee80211_is_s1g_beacon(mgmt->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) * From here on, look only at management frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) * Data and control frames are already handled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) * and unknown (reserved) frames are useless.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) if (rx->skb->len < 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) if (!ieee80211_is_mgmt(mgmt->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) ieee80211_is_beacon(mgmt->frame_control) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) int sig = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) !(status->flag & RX_FLAG_NO_SIGNAL_VAL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) sig = status->signal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) cfg80211_report_obss_beacon_khz(rx->local->hw.wiphy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) rx->skb->data, rx->skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) ieee80211_rx_status_to_khz(status),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) sig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) rx->flags |= IEEE80211_RX_BEACON_REPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) if (ieee80211_drop_unencrypted_mgmt(rx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) return RX_DROP_UNUSABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) static ieee80211_rx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) struct ieee80211_local *local = rx->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) struct ieee80211_sub_if_data *sdata = rx->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) int len = rx->skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) if (!ieee80211_is_action(mgmt->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) /* drop too small frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) if (len < IEEE80211_MIN_ACTION_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) return RX_DROP_UNUSABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) return RX_DROP_UNUSABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) switch (mgmt->u.action.category) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) case WLAN_CATEGORY_HT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) /* reject HT action frames from stations not supporting HT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) if (!rx->sta->sta.ht_cap.ht_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) if (sdata->vif.type != NL80211_IFTYPE_STATION &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) sdata->vif.type != NL80211_IFTYPE_AP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) sdata->vif.type != NL80211_IFTYPE_ADHOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) /* verify action & smps_control/chanwidth are present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) if (len < IEEE80211_MIN_ACTION_SIZE + 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) switch (mgmt->u.action.u.ht_smps.action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) case WLAN_HT_ACTION_SMPS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) struct ieee80211_supported_band *sband;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) enum ieee80211_smps_mode smps_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) struct sta_opmode_info sta_opmode = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) if (sdata->vif.type != NL80211_IFTYPE_AP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) goto handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) /* convert to HT capability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) switch (mgmt->u.action.u.ht_smps.smps_control) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) case WLAN_HT_SMPS_CONTROL_DISABLED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) smps_mode = IEEE80211_SMPS_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) case WLAN_HT_SMPS_CONTROL_STATIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) smps_mode = IEEE80211_SMPS_STATIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) case WLAN_HT_SMPS_CONTROL_DYNAMIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) smps_mode = IEEE80211_SMPS_DYNAMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) /* if no change do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) if (rx->sta->sta.smps_mode == smps_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) goto handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) rx->sta->sta.smps_mode = smps_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) sta_opmode.smps_mode =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) ieee80211_smps_mode_to_smps_mode(smps_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) sta_opmode.changed = STA_OPMODE_SMPS_MODE_CHANGED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) sband = rx->local->hw.wiphy->bands[status->band];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) rate_control_rate_update(local, sband, rx->sta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) IEEE80211_RC_SMPS_CHANGED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) cfg80211_sta_opmode_change_notify(sdata->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) rx->sta->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) &sta_opmode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) goto handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) struct ieee80211_supported_band *sband;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) enum ieee80211_sta_rx_bandwidth max_bw, new_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) struct sta_opmode_info sta_opmode = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) /* If it doesn't support 40 MHz it can't change ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) if (!(rx->sta->sta.ht_cap.cap &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) IEEE80211_HT_CAP_SUP_WIDTH_20_40))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) goto handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) max_bw = IEEE80211_STA_RX_BW_20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) max_bw = ieee80211_sta_cap_rx_bw(rx->sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) /* set cur_max_bandwidth and recalc sta bw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) rx->sta->cur_max_bandwidth = max_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) new_bw = ieee80211_sta_cur_vht_bw(rx->sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) if (rx->sta->sta.bandwidth == new_bw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) goto handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) rx->sta->sta.bandwidth = new_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) sband = rx->local->hw.wiphy->bands[status->band];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) sta_opmode.bw =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) ieee80211_sta_rx_bw_to_chan_width(rx->sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) sta_opmode.changed = STA_OPMODE_MAX_BW_CHANGED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) rate_control_rate_update(local, sband, rx->sta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) IEEE80211_RC_BW_CHANGED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) cfg80211_sta_opmode_change_notify(sdata->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) rx->sta->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) &sta_opmode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) goto handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) case WLAN_CATEGORY_PUBLIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) if (len < IEEE80211_MIN_ACTION_SIZE + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) if (sdata->vif.type != NL80211_IFTYPE_STATION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) if (!rx->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) if (mgmt->u.action.u.ext_chan_switch.action_code !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) WLAN_PUB_ACTION_EXT_CHANSW_ANN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) if (len < offsetof(struct ieee80211_mgmt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) u.action.u.ext_chan_switch.variable))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) goto queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) case WLAN_CATEGORY_VHT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) if (sdata->vif.type != NL80211_IFTYPE_STATION &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) sdata->vif.type != NL80211_IFTYPE_AP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) sdata->vif.type != NL80211_IFTYPE_ADHOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) /* verify action code is present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) if (len < IEEE80211_MIN_ACTION_SIZE + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) switch (mgmt->u.action.u.vht_opmode_notif.action_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) case WLAN_VHT_ACTION_OPMODE_NOTIF: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) /* verify opmode is present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) if (len < IEEE80211_MIN_ACTION_SIZE + 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) goto queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) case WLAN_VHT_ACTION_GROUPID_MGMT: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) if (len < IEEE80211_MIN_ACTION_SIZE + 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) goto queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) case WLAN_CATEGORY_BACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) if (sdata->vif.type != NL80211_IFTYPE_STATION &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) sdata->vif.type != NL80211_IFTYPE_AP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) sdata->vif.type != NL80211_IFTYPE_ADHOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) /* verify action_code is present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) if (len < IEEE80211_MIN_ACTION_SIZE + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) switch (mgmt->u.action.u.addba_req.action_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) case WLAN_ACTION_ADDBA_REQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) if (len < (IEEE80211_MIN_ACTION_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) sizeof(mgmt->u.action.u.addba_req)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) case WLAN_ACTION_ADDBA_RESP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) if (len < (IEEE80211_MIN_ACTION_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) sizeof(mgmt->u.action.u.addba_resp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) case WLAN_ACTION_DELBA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) if (len < (IEEE80211_MIN_ACTION_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) sizeof(mgmt->u.action.u.delba)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) goto queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) case WLAN_CATEGORY_SPECTRUM_MGMT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) /* verify action_code is present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) if (len < IEEE80211_MIN_ACTION_SIZE + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) switch (mgmt->u.action.u.measurement.action_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) case WLAN_ACTION_SPCT_MSR_REQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) if (status->band != NL80211_BAND_5GHZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) if (len < (IEEE80211_MIN_ACTION_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) sizeof(mgmt->u.action.u.measurement)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) if (sdata->vif.type != NL80211_IFTYPE_STATION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) ieee80211_process_measurement_req(sdata, mgmt, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) goto handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) case WLAN_ACTION_SPCT_CHL_SWITCH: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) u8 *bssid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) if (len < (IEEE80211_MIN_ACTION_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) sizeof(mgmt->u.action.u.chan_switch)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) if (sdata->vif.type != NL80211_IFTYPE_STATION &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) sdata->vif.type != NL80211_IFTYPE_ADHOC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) if (sdata->vif.type == NL80211_IFTYPE_STATION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) bssid = sdata->u.mgd.bssid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) bssid = sdata->u.ibss.bssid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) bssid = mgmt->sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) if (!ether_addr_equal(mgmt->bssid, bssid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) goto queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) case WLAN_CATEGORY_SELF_PROTECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) if (len < (IEEE80211_MIN_ACTION_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) sizeof(mgmt->u.action.u.self_prot.action_code)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) switch (mgmt->u.action.u.self_prot.action_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) case WLAN_SP_MESH_PEERING_OPEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) case WLAN_SP_MESH_PEERING_CLOSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) case WLAN_SP_MESH_PEERING_CONFIRM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) if (!ieee80211_vif_is_mesh(&sdata->vif))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) if (sdata->u.mesh.user_mpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) /* userspace handles this frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) goto queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) case WLAN_SP_MGK_INFORM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) case WLAN_SP_MGK_ACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) if (!ieee80211_vif_is_mesh(&sdata->vif))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) case WLAN_CATEGORY_MESH_ACTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) if (len < (IEEE80211_MIN_ACTION_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) sizeof(mgmt->u.action.u.mesh_action.action_code)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) if (!ieee80211_vif_is_mesh(&sdata->vif))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) if (mesh_action_is_path_sel(mgmt) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) !mesh_path_sel_is_hwmp(sdata))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) goto queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) invalid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) /* will return in the next handlers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) handled:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) if (rx->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) rx->sta->rx_stats.packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) dev_kfree_skb(rx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) return RX_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) skb_queue_tail(&sdata->skb_queue, rx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) ieee80211_queue_work(&local->hw, &sdata->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) if (rx->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) rx->sta->rx_stats.packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) return RX_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) static ieee80211_rx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) int sig = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) /* skip known-bad action frames and return them in the next handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) * Getting here means the kernel doesn't know how to handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) * it, but maybe userspace does ... include returned frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) * so userspace can register for those to know whether ones
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) * it transmitted were processed or returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) !(status->flag & RX_FLAG_NO_SIGNAL_VAL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) sig = status->signal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) if (cfg80211_rx_mgmt_khz(&rx->sdata->wdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) ieee80211_rx_status_to_khz(status), sig,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) rx->skb->data, rx->skb->len, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) if (rx->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) rx->sta->rx_stats.packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) dev_kfree_skb(rx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) return RX_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) static ieee80211_rx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) ieee80211_rx_h_action_post_userspace(struct ieee80211_rx_data *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) struct ieee80211_sub_if_data *sdata = rx->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) int len = rx->skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) if (!ieee80211_is_action(mgmt->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) switch (mgmt->u.action.category) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) case WLAN_CATEGORY_SA_QUERY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) if (len < (IEEE80211_MIN_ACTION_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) sizeof(mgmt->u.action.u.sa_query)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) switch (mgmt->u.action.u.sa_query.action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) case WLAN_ACTION_SA_QUERY_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) if (sdata->vif.type != NL80211_IFTYPE_STATION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) ieee80211_process_sa_query_req(sdata, mgmt, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) goto handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) handled:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) if (rx->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) rx->sta->rx_stats.packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) dev_kfree_skb(rx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) return RX_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) static ieee80211_rx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) struct ieee80211_local *local = rx->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) struct sk_buff *nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) struct ieee80211_sub_if_data *sdata = rx->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) if (!ieee80211_is_action(mgmt->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) * For AP mode, hostapd is responsible for handling any action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) * frames that we didn't handle, including returning unknown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) * ones. For all other modes we will return them to the sender,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) * setting the 0x80 bit in the action category, as required by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) * 802.11-2012 9.24.4.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) * Newer versions of hostapd shall also use the management frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) * registration mechanisms, but older ones still use cooked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) * monitor interfaces so push all frames there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) (sdata->vif.type == NL80211_IFTYPE_AP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) if (is_multicast_ether_addr(mgmt->da))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) /* do not return rejected action frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) if (mgmt->u.action.category & 0x80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) return RX_DROP_UNUSABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) if (nskb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) nmgmt->u.action.category |= 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) memset(nskb->cb, 0, sizeof(nskb->cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) info->flags = IEEE80211_TX_CTL_TX_OFFCHAN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) IEEE80211_TX_INTFL_OFFCHAN_TX_OK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) IEEE80211_TX_CTL_NO_CCK_RATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) info->hw_queue =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) local->hw.offchannel_tx_hw_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) status->band);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) dev_kfree_skb(rx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) return RX_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) static ieee80211_rx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) ieee80211_rx_h_ext(struct ieee80211_rx_data *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) struct ieee80211_sub_if_data *sdata = rx->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) struct ieee80211_hdr *hdr = (void *)rx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) if (!ieee80211_is_ext(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) if (sdata->vif.type != NL80211_IFTYPE_STATION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) /* for now only beacons are ext, so queue them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) skb_queue_tail(&sdata->skb_queue, rx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) ieee80211_queue_work(&rx->local->hw, &sdata->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) if (rx->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) rx->sta->rx_stats.packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) return RX_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) static ieee80211_rx_result debug_noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) struct ieee80211_sub_if_data *sdata = rx->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) __le16 stype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) if (!ieee80211_vif_is_mesh(&sdata->vif) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) sdata->vif.type != NL80211_IFTYPE_ADHOC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) sdata->vif.type != NL80211_IFTYPE_OCB &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) sdata->vif.type != NL80211_IFTYPE_STATION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) switch (stype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) case cpu_to_le16(IEEE80211_STYPE_AUTH):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) case cpu_to_le16(IEEE80211_STYPE_BEACON):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) /* process for all: mesh, mlme, ibss */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) if (is_multicast_ether_addr(mgmt->da) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) !is_broadcast_ether_addr(mgmt->da))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) /* process only for station/IBSS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) if (sdata->vif.type != NL80211_IFTYPE_STATION &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) sdata->vif.type != NL80211_IFTYPE_ADHOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) if (is_multicast_ether_addr(mgmt->da) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) !is_broadcast_ether_addr(mgmt->da))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) /* process only for station */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) if (sdata->vif.type != NL80211_IFTYPE_STATION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) /* process only for ibss and mesh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) return RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) /* queue up frame and kick off work to process it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) skb_queue_tail(&sdata->skb_queue, rx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) ieee80211_queue_work(&rx->local->hw, &sdata->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) if (rx->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) rx->sta->rx_stats.packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) return RX_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) struct ieee80211_rate *rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) struct ieee80211_sub_if_data *sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) struct ieee80211_local *local = rx->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) struct sk_buff *skb = rx->skb, *skb2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) struct net_device *prev_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) int needed_headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) * If cooked monitor has been processed already, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) * don't do it again. If not, set the flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) if (rx->flags & IEEE80211_RX_CMNTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) goto out_free_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) rx->flags |= IEEE80211_RX_CMNTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) /* If there are no cooked monitor interfaces, just free the SKB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) if (!local->cooked_mntrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) goto out_free_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) /* vendor data is long removed here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) status->flag &= ~RX_FLAG_RADIOTAP_VENDOR_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) /* room for the radiotap header based on driver features */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) if (skb_headroom(skb) < needed_headroom &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) goto out_free_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) /* prepend radiotap information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) skb->pkt_type = PACKET_OTHERHOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) skb->protocol = htons(ETH_P_802_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) list_for_each_entry_rcu(sdata, &local->interfaces, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) if (!ieee80211_sdata_running(sdata))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) !(sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) if (prev_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) skb2 = skb_clone(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) if (skb2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) skb2->dev = prev_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) netif_receive_skb(skb2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) prev_dev = sdata->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) ieee80211_rx_stats(sdata->dev, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) if (prev_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) skb->dev = prev_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) netif_receive_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) out_free_skb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) ieee80211_rx_result res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) switch (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) case RX_DROP_MONITOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) if (rx->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) rx->sta->rx_stats.dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) case RX_CONTINUE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) struct ieee80211_rate *rate = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) struct ieee80211_supported_band *sband;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) struct ieee80211_rx_status *status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) status = IEEE80211_SKB_RXCB((rx->skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) sband = rx->local->hw.wiphy->bands[status->band];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) if (status->encoding == RX_ENC_LEGACY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) rate = &sband->bitrates[status->rate_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) ieee80211_rx_cooked_monitor(rx, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) case RX_DROP_UNUSABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) if (rx->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) rx->sta->rx_stats.dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) dev_kfree_skb(rx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) case RX_QUEUED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) struct sk_buff_head *frames)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) ieee80211_rx_result res = RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) #define CALL_RXH(rxh) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) res = rxh(rx); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) if (res != RX_CONTINUE) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) goto rxh_next; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) /* Lock here to avoid hitting all of the data used in the RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) * path (e.g. key data, station data, ...) concurrently when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) * a frame is released from the reorder buffer due to timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) * from the timer, potentially concurrently with RX from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) * driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) spin_lock_bh(&rx->local->rx_path_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) while ((skb = __skb_dequeue(frames))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) * all the other fields are valid across frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) * that belong to an aMPDU since they are on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) * same TID from the same station
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) rx->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) CALL_RXH(ieee80211_rx_h_check_more_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) CALL_RXH(ieee80211_rx_h_sta_process);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) CALL_RXH(ieee80211_rx_h_decrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) CALL_RXH(ieee80211_rx_h_defragment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) CALL_RXH(ieee80211_rx_h_michael_mic_verify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) /* must be after MMIC verify so header is counted in MPDU mic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) #ifdef CONFIG_MAC80211_MESH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) if (ieee80211_vif_is_mesh(&rx->sdata->vif))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) CALL_RXH(ieee80211_rx_h_mesh_fwding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) CALL_RXH(ieee80211_rx_h_amsdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) CALL_RXH(ieee80211_rx_h_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) /* special treatment -- needs the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) res = ieee80211_rx_h_ctrl(rx, frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) if (res != RX_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) goto rxh_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) CALL_RXH(ieee80211_rx_h_mgmt_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) CALL_RXH(ieee80211_rx_h_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) CALL_RXH(ieee80211_rx_h_userspace_mgmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) CALL_RXH(ieee80211_rx_h_action_post_userspace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) CALL_RXH(ieee80211_rx_h_action_return);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) CALL_RXH(ieee80211_rx_h_ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) CALL_RXH(ieee80211_rx_h_mgmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) rxh_next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) ieee80211_rx_handlers_result(rx, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) #undef CALL_RXH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) spin_unlock_bh(&rx->local->rx_path_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) struct sk_buff_head reorder_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) ieee80211_rx_result res = RX_DROP_MONITOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) __skb_queue_head_init(&reorder_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) #define CALL_RXH(rxh) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) res = rxh(rx); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) if (res != RX_CONTINUE) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) goto rxh_next; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) CALL_RXH(ieee80211_rx_h_check_dup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) CALL_RXH(ieee80211_rx_h_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) ieee80211_rx_reorder_ampdu(rx, &reorder_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) ieee80211_rx_handlers(rx, &reorder_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) rxh_next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) ieee80211_rx_handlers_result(rx, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) #undef CALL_RXH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) * This function makes calls into the RX path, therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) * it has to be invoked under RCU read lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) struct sk_buff_head frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) struct ieee80211_rx_data rx = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) .sta = sta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) .sdata = sta->sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) .local = sta->local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) /* This is OK -- must be QoS data frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) .security_idx = tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) .seqno_idx = tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) struct tid_ampdu_rx *tid_agg_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) if (!tid_agg_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) __skb_queue_head_init(&frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) spin_lock(&tid_agg_rx->reorder_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) spin_unlock(&tid_agg_rx->reorder_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) if (!skb_queue_empty(&frames)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) struct ieee80211_event event = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) .type = BA_FRAME_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) .u.ba.tid = tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) .u.ba.sta = &sta->sta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) drv_event_callback(rx.local, rx.sdata, &event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) ieee80211_rx_handlers(&rx, &frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) u16 ssn, u64 filtered,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) u16 received_mpdus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) struct sta_info *sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) struct tid_ampdu_rx *tid_agg_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) struct sk_buff_head frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) struct ieee80211_rx_data rx = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) /* This is OK -- must be QoS data frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) .security_idx = tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) .seqno_idx = tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) int i, diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) __skb_queue_head_init(&frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) sta = container_of(pubsta, struct sta_info, sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) rx.sta = sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) rx.sdata = sta->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) rx.local = sta->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) if (!tid_agg_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) spin_lock_bh(&tid_agg_rx->reorder_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) if (received_mpdus >= IEEE80211_SN_MODULO >> 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) int release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) /* release all frames in the reorder buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) IEEE80211_SN_MODULO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) release, &frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) /* update ssn to match received ssn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) tid_agg_rx->head_seq_num = ssn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) &frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) /* handle the case that received ssn is behind the mac ssn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) * it can be tid_agg_rx->buf_size behind and still be valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) if (diff >= tid_agg_rx->buf_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) tid_agg_rx->reorder_buf_filtered = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) filtered = filtered >> diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) ssn += diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) /* update bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) for (i = 0; i < tid_agg_rx->buf_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) int index = (ssn + i) % tid_agg_rx->buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) if (filtered & BIT_ULL(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) /* now process also frames that the filter marking released */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) spin_unlock_bh(&tid_agg_rx->reorder_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) ieee80211_rx_handlers(&rx, &frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) /* main receive path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) struct ieee80211_sub_if_data *sdata = rx->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) struct sk_buff *skb = rx->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) struct ieee80211_hdr *hdr = (void *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) bool multicast = is_multicast_ether_addr(hdr->addr1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) ieee80211_is_s1g_beacon(hdr->frame_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) switch (sdata->vif.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) case NL80211_IFTYPE_STATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) if (!bssid && !sdata->u.mgd.use_4addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) if (ieee80211_is_robust_mgmt_frame(skb) && !rx->sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) if (multicast)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) return ether_addr_equal(sdata->vif.addr, hdr->addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) case NL80211_IFTYPE_ADHOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) if (!bssid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) !is_valid_ether_addr(hdr->addr2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) if (ieee80211_is_beacon(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) if (!multicast &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) !ether_addr_equal(sdata->vif.addr, hdr->addr1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) if (!rx->sta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) int rate_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) if (status->encoding != RX_ENC_LEGACY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) rate_idx = 0; /* TODO: HT/VHT rates */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) rate_idx = status->rate_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) BIT(rate_idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) case NL80211_IFTYPE_OCB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) if (!bssid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) if (!ieee80211_is_data_present(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) if (!is_broadcast_ether_addr(bssid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) if (!multicast &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) if (!rx->sta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) int rate_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) if (status->encoding != RX_ENC_LEGACY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) rate_idx = 0; /* TODO: HT rates */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) rate_idx = status->rate_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) BIT(rate_idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) case NL80211_IFTYPE_MESH_POINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) if (ether_addr_equal(sdata->vif.addr, hdr->addr2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) if (multicast)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) return ether_addr_equal(sdata->vif.addr, hdr->addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) case NL80211_IFTYPE_AP_VLAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) case NL80211_IFTYPE_AP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) if (!bssid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) return ether_addr_equal(sdata->vif.addr, hdr->addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) * Accept public action frames even when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) * BSSID doesn't match, this is used for P2P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) * and location updates. Note that mac80211
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) * itself never looks at these frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) if (!multicast &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) !ether_addr_equal(sdata->vif.addr, hdr->addr1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) if (ieee80211_is_public_action(hdr, skb->len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) return ieee80211_is_beacon(hdr->frame_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) if (!ieee80211_has_tods(hdr->frame_control)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) /* ignore data frames to TDLS-peers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) if (ieee80211_is_data(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) /* ignore action frames to TDLS-peers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) if (ieee80211_is_action(hdr->frame_control) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) !is_broadcast_ether_addr(bssid) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) !ether_addr_equal(bssid, hdr->addr1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) * 802.11-2016 Table 9-26 says that for data frames, A1 must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) * the BSSID - we've checked that already but may have accepted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) * the wildcard (ff:ff:ff:ff:ff:ff).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) * It also says:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) * The BSSID of the Data frame is determined as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) * a) If the STA is contained within an AP or is associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) * with an AP, the BSSID is the address currently in use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) * by the STA contained in the AP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) * So we should not accept data frames with an address that's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) * multicast.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) * Accepting it also opens a security problem because stations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) * could encrypt it with the GTK and inject traffic that way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) if (ieee80211_is_data(hdr->frame_control) && multicast)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) case NL80211_IFTYPE_WDS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) if (bssid || !ieee80211_is_data(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) return ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) case NL80211_IFTYPE_P2P_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) return ieee80211_is_public_action(hdr, skb->len) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) ieee80211_is_probe_req(hdr->frame_control) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) ieee80211_is_probe_resp(hdr->frame_control) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) ieee80211_is_beacon(hdr->frame_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) case NL80211_IFTYPE_NAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) /* Currently no frames on NAN interface are allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) void ieee80211_check_fast_rx(struct sta_info *sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) struct ieee80211_sub_if_data *sdata = sta->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) struct ieee80211_key *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) struct ieee80211_fast_rx fastrx = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) .dev = sdata->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) .vif_type = sdata->vif.type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) .control_port_protocol = sdata->control_port_protocol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) }, *old, *new = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) bool assign = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) /* use sparse to check that we don't return without updating */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) __acquire(check_fast_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) ether_addr_copy(fastrx.vif_addr, sdata->vif.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) /* fast-rx doesn't do reordering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) goto clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) switch (sdata->vif.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) case NL80211_IFTYPE_STATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) if (sta->sta.tdls) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) fastrx.expected_ds_bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) fastrx.expected_ds_bits =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) cpu_to_le16(IEEE80211_FCTL_FROMDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) if (sdata->u.mgd.use_4addr && !sta->sta.tdls) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) fastrx.expected_ds_bits |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) cpu_to_le16(IEEE80211_FCTL_TODS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) if (!sdata->u.mgd.powersave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) /* software powersave is a huge mess, avoid all of it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) goto clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) goto clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) case NL80211_IFTYPE_AP_VLAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) case NL80211_IFTYPE_AP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) /* parallel-rx requires this, at least with calls to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) * ieee80211_sta_ps_transition()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) goto clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) fastrx.internal_forward =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) (sdata->vif.type != NL80211_IFTYPE_AP_VLAN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) !sdata->u.vlan.sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) sdata->u.vlan.sta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) fastrx.expected_ds_bits |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) cpu_to_le16(IEEE80211_FCTL_FROMDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) fastrx.internal_forward = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) goto clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) goto clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) key = rcu_dereference(sta->ptk[sta->ptk_idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) if (!key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) key = rcu_dereference(sdata->default_unicast_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) if (key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) switch (key->conf.cipher) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) case WLAN_CIPHER_SUITE_TKIP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) /* we don't want to deal with MMIC in fast-rx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) goto clear_rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) case WLAN_CIPHER_SUITE_CCMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) case WLAN_CIPHER_SUITE_CCMP_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) case WLAN_CIPHER_SUITE_GCMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) case WLAN_CIPHER_SUITE_GCMP_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) /* We also don't want to deal with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) * WEP or cipher scheme.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) goto clear_rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) fastrx.key = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) fastrx.icv_len = key->conf.icv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) assign = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) clear_rcu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) clear:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) __release(check_fast_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) if (assign)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) spin_lock_bh(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) old = rcu_dereference_protected(sta->fast_rx, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) rcu_assign_pointer(sta->fast_rx, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) spin_unlock_bh(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) if (old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) kfree_rcu(old, rcu_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) void ieee80211_clear_fast_rx(struct sta_info *sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) struct ieee80211_fast_rx *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) spin_lock_bh(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) old = rcu_dereference_protected(sta->fast_rx, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) RCU_INIT_POINTER(sta->fast_rx, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) spin_unlock_bh(&sta->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) if (old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) kfree_rcu(old, rcu_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) struct sta_info *sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) lockdep_assert_held(&local->sta_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) list_for_each_entry(sta, &local->sta_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) if (sdata != sta->sdata &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) (!sta->sdata->bss || sta->sdata->bss != sdata->bss))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) ieee80211_check_fast_rx(sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) struct ieee80211_local *local = sdata->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) mutex_lock(&local->sta_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) __ieee80211_check_fast_rx_iface(sdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) mutex_unlock(&local->sta_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) struct ieee80211_fast_rx *fast_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) struct sk_buff *skb = rx->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) struct ieee80211_hdr *hdr = (void *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) struct sta_info *sta = rx->sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) int orig_len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) int hdrlen = ieee80211_hdrlen(hdr->frame_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) int snap_offs = hdrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) u8 snap[sizeof(rfc1042_header)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) __be16 proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) } *payload __aligned(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) u8 da[ETH_ALEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) u8 sa[ETH_ALEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) } addrs __aligned(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) struct ieee80211_sta_rx_stats *stats = &sta->rx_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) if (fast_rx->uses_rss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) stats = this_cpu_ptr(sta->pcpu_rx_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) * to a common data structure; drivers can implement that per queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) * but we don't have that information in mac80211
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) if (!(status->flag & RX_FLAG_DUP_VALIDATED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) #define FAST_RX_CRYPT_FLAGS (RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) /* If using encryption, we also need to have:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) * - PN_VALIDATED: similar, but the implementation is tricky
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) * - DECRYPTED: necessary for PN_VALIDATED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) if (fast_rx->key &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) if (unlikely(ieee80211_is_frag(hdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) /* Since our interface address cannot be multicast, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) * implicitly also rejects multicast frames without the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) * explicit check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) * We shouldn't get any *data* frames not addressed to us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) * (AP mode will accept multicast *management* frames), but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) * punting here will make it go through the full checks in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) * ieee80211_accept_frame().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) IEEE80211_FCTL_TODS)) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) fast_rx->expected_ds_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) /* assign the key to drop unencrypted frames (later)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) * and strip the IV/MIC if necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) /* GCMP header length is the same */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) snap_offs += IEEE80211_CCMP_HDR_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) if (!(status->rx_flags & IEEE80211_RX_AMSDU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) if (!pskb_may_pull(skb, snap_offs + sizeof(*payload)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) payload = (void *)(skb->data + snap_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) /* Don't handle these here since they require special code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) * Accept AARP and IPX even though they should come with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) * bridge-tunnel header - but if we get them this way then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) * there's little point in discarding them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) payload->proto == fast_rx->control_port_protocol))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) /* after this point, don't punt to the slowpath! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) pskb_trim(skb, skb->len - fast_rx->icv_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) /* statistics part of ieee80211_rx_h_sta_process() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) stats->last_signal = status->signal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) if (!fast_rx->uses_rss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) ewma_signal_add(&sta->rx_stats_avg.signal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) -status->signal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) if (status->chains) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) stats->chains = status->chains;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) int signal = status->chain_signal[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) if (!(status->chains & BIT(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) stats->chain_signal_last[i] = signal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) if (!fast_rx->uses_rss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) -signal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) /* end of statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) if (rx->key && !ieee80211_has_protected(hdr->frame_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) if (status->rx_flags & IEEE80211_RX_AMSDU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) if (__ieee80211_rx_h_amsdu(rx, snap_offs - hdrlen) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) RX_QUEUED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) stats->last_rx = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) stats->last_rate = sta_stats_encode_rate(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) stats->fragments++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) stats->packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) /* do the header conversion - first grab the addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) /* remove the SNAP but leave the ethertype */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) skb_pull(skb, snap_offs + sizeof(rfc1042_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) /* push the addresses in front */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) skb->dev = fast_rx->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) ieee80211_rx_stats(fast_rx->dev, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) /* The seqno index has the same property as needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) * for non-QoS-data frames. Here we know it's a data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) * frame, so count MSDUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) u64_stats_update_begin(&stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) stats->msdu[rx->seqno_idx]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) stats->bytes += orig_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) u64_stats_update_end(&stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) if (fast_rx->internal_forward) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) struct sk_buff *xmit_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) if (is_multicast_ether_addr(addrs.da)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) xmit_skb = skb_copy(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) } else if (!ether_addr_equal(addrs.da, addrs.sa) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) sta_info_get(rx->sdata, addrs.da)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) xmit_skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) if (xmit_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) * Send to wireless media and increase priority by 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) * to keep the received priority instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) * reclassifying the frame (see cfg80211_classify8021d).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) xmit_skb->priority += 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) xmit_skb->protocol = htons(ETH_P_802_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) skb_reset_network_header(xmit_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) skb_reset_mac_header(xmit_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) dev_queue_xmit(xmit_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) /* deliver to local stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) skb->protocol = eth_type_trans(skb, fast_rx->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) memset(skb->cb, 0, sizeof(skb->cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) if (rx->list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) list_add_tail(&skb->list, rx->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) netif_receive_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) stats->dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) * This function returns whether or not the SKB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) * was destined for RX processing or not, which,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) * if consume is true, is equivalent to whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) * or not the skb was consumed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) struct sk_buff *skb, bool consume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) struct ieee80211_local *local = rx->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) struct ieee80211_sub_if_data *sdata = rx->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) rx->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) /* See if we can do fast-rx; if we have to copy we already lost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) * so punt in that case. We should never have to deliver a data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) * frame to multiple interfaces anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) * We skip the ieee80211_accept_frame() call and do the necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) * checking inside ieee80211_invoke_fast_rx().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) if (consume && rx->sta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) struct ieee80211_fast_rx *fast_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) fast_rx = rcu_dereference(rx->sta->fast_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) if (!ieee80211_accept_frame(rx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) if (!consume) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) skb = skb_copy(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) wiphy_debug(local->hw.wiphy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) "failed to copy skb for %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) sdata->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) rx->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) ieee80211_invoke_rx_handlers(rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) * This is the actual Rx frames handler. as it belongs to Rx path it must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) * be called with rcu_read_lock protection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) struct ieee80211_sta *pubsta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) struct ieee80211_local *local = hw_to_local(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) struct ieee80211_sub_if_data *sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) struct ieee80211_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) __le16 fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) struct ieee80211_rx_data rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) struct ieee80211_sub_if_data *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) struct rhlist_head *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) memset(&rx, 0, sizeof(rx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) rx.skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) rx.local = local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) rx.list = list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) if (ieee80211_is_mgmt(fc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) /* drop frame if too short for header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) if (skb->len < ieee80211_hdrlen(fc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) err = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) err = skb_linearize(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) hdr = (struct ieee80211_hdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) ieee80211_parse_qos(&rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) ieee80211_verify_alignment(&rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) ieee80211_is_beacon(hdr->frame_control) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) ieee80211_is_s1g_beacon(hdr->frame_control)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) ieee80211_scan_rx(local, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) if (ieee80211_is_data(fc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) struct sta_info *sta, *prev_sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) if (pubsta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) rx.sta = container_of(pubsta, struct sta_info, sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) rx.sdata = rx.sta->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) prev_sta = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) for_each_sta_info(local, hdr->addr2, sta, tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) if (!prev_sta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) prev_sta = sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) rx.sta = prev_sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) rx.sdata = prev_sta->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) ieee80211_prepare_and_rx_handle(&rx, skb, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) prev_sta = sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) if (prev_sta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) rx.sta = prev_sta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) rx.sdata = prev_sta->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) list_for_each_entry_rcu(sdata, &local->interfaces, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) if (!ieee80211_sdata_running(sdata))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) * frame is destined for this interface, but if it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) * not also for the previous one we handle that after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) * the loop to avoid copying the SKB once too much
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) if (!prev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) prev = sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) rx.sta = sta_info_get_bss(prev, hdr->addr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) rx.sdata = prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) ieee80211_prepare_and_rx_handle(&rx, skb, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) prev = sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) if (prev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) rx.sta = sta_info_get_bss(prev, hdr->addr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) rx.sdata = prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) * This is the receive path handler. It is called by a low level driver when an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) * 802.11 MPDU is received from the hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) struct sk_buff *skb, struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) struct ieee80211_local *local = hw_to_local(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) struct ieee80211_rate *rate = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) struct ieee80211_supported_band *sband;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) WARN_ON_ONCE(softirq_count() == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) if (WARN_ON(status->band >= NUM_NL80211_BANDS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) sband = local->hw.wiphy->bands[status->band];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) if (WARN_ON(!sband))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) * If we're suspending, it is possible although not too likely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) * that we'd be receiving frames after having already partially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) * quiesced the stack. We can't process such frames then since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) * that might, for example, cause stations to be added or other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) * driver callbacks be invoked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) if (unlikely(local->quiescing || local->suspended))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) /* We might be during a HW reconfig, prevent Rx for the same reason */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) if (unlikely(local->in_reconfig))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) * The same happens when we're not even started,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) * but that's worth a warning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) if (WARN_ON(!local->started))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) * Validate the rate, unless a PLCP error means that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) * we probably can't have a valid rate here anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) switch (status->encoding) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) case RX_ENC_HT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) * rate_idx is MCS index, which can be [0-76]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) * as documented on:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) * https://wireless.wiki.kernel.org/en/developers/Documentation/ieee80211/802.11n
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) * Anything else would be some sort of driver or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) * hardware error. The driver should catch hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) * errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) if (WARN(status->rate_idx > 76,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) "Rate marked as an HT rate but passed "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) "status->rate_idx is not "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) "an MCS index [0-76]: %d (0x%02x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) status->rate_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) status->rate_idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) case RX_ENC_VHT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) if (WARN_ONCE(status->rate_idx > 11 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) !status->nss ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) status->nss > 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) status->rate_idx, status->nss))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) case RX_ENC_HE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) if (WARN_ONCE(status->rate_idx > 11 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) !status->nss ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) status->nss > 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) "Rate marked as an HE rate but data is invalid: MCS: %d, NSS: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) status->rate_idx, status->nss))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) case RX_ENC_LEGACY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) if (WARN_ON(status->rate_idx >= sband->n_bitrates))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) rate = &sband->bitrates[status->rate_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) status->rx_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) * Frames with failed FCS/PLCP checksum are not returned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) * all other frames are returned without radiotap header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) * if it was previously present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) * Also, frames with less than 16 bytes are dropped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) skb = ieee80211_rx_monitor(local, skb, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) ieee80211_tpt_led_trig_rx(local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) ((struct ieee80211_hdr *)skb->data)->frame_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) __ieee80211_rx_handle_packet(hw, pubsta, skb, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) EXPORT_SYMBOL(ieee80211_rx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) struct sk_buff *skb, struct napi_struct *napi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) struct sk_buff *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) * key references and virtual interfaces are protected using RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) * and this requires that we are in a read-side RCU section during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) * receive processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) ieee80211_rx_list(hw, pubsta, skb, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) if (!napi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) netif_receive_skb_list(&list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) list_for_each_entry_safe(skb, tmp, &list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) skb_list_del_init(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) napi_gro_receive(napi, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) EXPORT_SYMBOL(ieee80211_rx_napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) /* This is a version of the rx handler that can be called from hard irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) * context. Post the skb on the queue and schedule the tasklet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) struct ieee80211_local *local = hw_to_local(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) skb->pkt_type = IEEE80211_RX_MSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) skb_queue_tail(&local->skb_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) tasklet_schedule(&local->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) EXPORT_SYMBOL(ieee80211_rx_irqsafe);