^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Intel Wireless WiMAX Connection 2400m
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Handle incoming traffic and deliver it to the control or data planes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * * Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * * Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * notice, this list of conditions and the following disclaimer in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * the documentation and/or other materials provided with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * * Neither the name of Intel Corporation nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * contributors may be used to endorse or promote products derived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * from this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Intel Corporation <linux-wimax@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Yanir Lubetkin <yanirx.lubetkin@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * - Initial implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * - Use skb_clone(), break up processing in chunks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * - Split transport/device specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * - Make buffer size dynamic to exert less memory pressure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * - RX reorder support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * This handles the RX path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * We receive an RX message from the bus-specific driver, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * contains one or more payloads that have potentially different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * destinataries (data or control paths).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * So we just take that payload from the transport specific code in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * the form of an skb, break it up in chunks (a cloned skb each in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * case of network packets) and pass it to netdev or to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * command/ack handler (and from there to the WiMAX stack).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * PROTOCOL FORMAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * The format of the buffer is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * HEADER (struct i2400m_msg_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * PAYLOAD DESCRIPTOR 0 (struct i2400m_pld)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * PAYLOAD DESCRIPTOR 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * PAYLOAD DESCRIPTOR N
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * PAYLOAD 0 (raw bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * PAYLOAD 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * PAYLOAD N
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * See tx.c for a deeper description on alignment requirements and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * other fun facts of it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * DATA PACKETS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * In firmwares <= v1.3, data packets have no header for RX, but they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * do for TX (currently unused).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * In firmware >= 1.4, RX packets have an extended header (16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * bytes). This header conveys information for management of host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * reordering of packets (the device offloads storage of the packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * for reordering to the host). Read below for more information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * The header is used as dummy space to emulate an ethernet header and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * thus be able to act as an ethernet device without having to reallocate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * DATA RX REORDERING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * Starting in firmware v1.4, the device can deliver packets for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * delivery with special reordering information; this allows it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * more effectively do packet management when some frames were lost in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * the radio traffic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * Thus, for RX packets that come out of order, the device gives the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * driver enough information to queue them properly and then at some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * point, the signal to deliver the whole (or part) of the queued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * packets to the networking stack. There are 16 such queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * This only happens when a packet comes in with the "need reorder"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * flag set in the RX header. When such bit is set, the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * operations might be indicated:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * - reset queue: send all queued packets to the OS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * - queue: queue a packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * - update ws: update the queue's window start and deliver queued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * packets that meet the criteria
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * - queue & update ws: queue a packet, update the window start and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * deliver queued packets that meet the criteria
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * (delivery criteria: the packet's [normalized] sequence number is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * lower than the new [normalized] window start).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * See the i2400m_roq_*() functions for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * ROADMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * i2400m_rx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * i2400m_rx_msg_hdr_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * i2400m_rx_pl_descr_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * i2400m_rx_payload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * i2400m_net_rx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * i2400m_rx_edata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * i2400m_net_erx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * i2400m_roq_reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * i2400m_net_erx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * i2400m_roq_queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * __i2400m_roq_queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * i2400m_roq_update_ws
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * __i2400m_roq_update_ws
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * i2400m_net_erx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * i2400m_roq_queue_update_ws
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * __i2400m_roq_queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * __i2400m_roq_update_ws
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * i2400m_net_erx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * i2400m_rx_ctl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * i2400m_msg_size_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * i2400m_report_hook_work [in a workqueue]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * i2400m_report_hook
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * wimax_msg_to_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * i2400m_rx_ctl_ack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * wimax_msg_to_user_alloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * i2400m_rx_trace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * i2400m_msg_size_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * wimax_msg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #include <linux/if_arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #include "i2400m.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define D_SUBMODULE rx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #include "debug-levels.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static int i2400m_rx_reorder_disabled; /* 0 (rx reorder enabled) by default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) module_param_named(rx_reorder_disabled, i2400m_rx_reorder_disabled, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) MODULE_PARM_DESC(rx_reorder_disabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) "If true, RX reordering will be disabled.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct i2400m_report_hook_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct sk_buff *skb_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) const struct i2400m_l3l4_hdr *l3l4_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct list_head list_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * Execute i2400m_report_hook in a workqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * Goes over the list of queued reports in i2400m->rx_reports and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * processes them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * NOTE: refcounts on i2400m are not needed because we flush the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * workqueue this runs on (i2400m->work_queue) before destroying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * i2400m.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) void i2400m_report_hook_work(struct work_struct *ws)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct i2400m *i2400m = container_of(ws, struct i2400m, rx_report_ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct i2400m_report_hook_args *args, *args_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) spin_lock_irqsave(&i2400m->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) list_splice_init(&i2400m->rx_reports, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) spin_unlock_irqrestore(&i2400m->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (list_empty(&list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) d_printf(1, dev, "processing queued reports\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) list_for_each_entry_safe(args, args_next, &list, list_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) d_printf(2, dev, "processing queued report %p\n", args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) i2400m_report_hook(i2400m, args->l3l4_hdr, args->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) kfree_skb(args->skb_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) list_del(&args->list_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) kfree(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * Flush the list of queued reports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) void i2400m_report_hook_flush(struct i2400m *i2400m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct i2400m_report_hook_args *args, *args_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) d_printf(1, dev, "flushing queued reports\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) spin_lock_irqsave(&i2400m->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) list_splice_init(&i2400m->rx_reports, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) spin_unlock_irqrestore(&i2400m->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) list_for_each_entry_safe(args, args_next, &list, list_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) d_printf(2, dev, "flushing queued report %p\n", args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) kfree_skb(args->skb_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) list_del(&args->list_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) kfree(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * Queue a report for later processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * @i2400m: device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * @skb_rx: skb that contains the payload (for reference counting)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * @l3l4_hdr: pointer to the control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * @size: size of the message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) void i2400m_report_hook_queue(struct i2400m *i2400m, struct sk_buff *skb_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) const void *l3l4_hdr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct i2400m_report_hook_args *args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) args = kzalloc(sizeof(*args), GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (args) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) args->skb_rx = skb_get(skb_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) args->l3l4_hdr = l3l4_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) args->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) spin_lock_irqsave(&i2400m->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) list_add_tail(&args->list_node, &i2400m->rx_reports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) spin_unlock_irqrestore(&i2400m->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) d_printf(2, dev, "queued report %p\n", args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) rmb(); /* see i2400m->ready's documentation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (likely(i2400m->ready)) /* only send if up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) queue_work(i2400m->work_queue, &i2400m->rx_report_ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) dev_err(dev, "%s:%u: Can't allocate %zu B\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) __func__, __LINE__, sizeof(*args));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * Process an ack to a command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * @i2400m: device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * @payload: pointer to message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * @size: size of the message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * Pass the acknodledgment (in an skb) to the thread that is waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * for it in i2400m->msg_completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * We need to coordinate properly with the thread waiting for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * ack. Check if it is waiting or if it is gone. We loose the spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * to avoid allocating on atomic contexts (yeah, could use GFP_ATOMIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * but this is not so speed critical).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) void i2400m_rx_ctl_ack(struct i2400m *i2400m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) const void *payload, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct sk_buff *ack_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /* Anyone waiting for an answer? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) spin_lock_irqsave(&i2400m->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (i2400m->ack_skb != ERR_PTR(-EINPROGRESS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) dev_err(dev, "Huh? reply to command with no waiters\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) goto error_no_waiter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) spin_unlock_irqrestore(&i2400m->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) ack_skb = wimax_msg_alloc(wimax_dev, NULL, payload, size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* Check waiter didn't time out waiting for the answer... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) spin_lock_irqsave(&i2400m->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (i2400m->ack_skb != ERR_PTR(-EINPROGRESS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) d_printf(1, dev, "Huh? waiter for command reply cancelled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) goto error_waiter_cancelled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (IS_ERR(ack_skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) dev_err(dev, "CMD/GET/SET ack: cannot allocate SKB\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) i2400m->ack_skb = ack_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) spin_unlock_irqrestore(&i2400m->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) complete(&i2400m->msg_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) error_waiter_cancelled:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (!IS_ERR(ack_skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) kfree_skb(ack_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) error_no_waiter:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) spin_unlock_irqrestore(&i2400m->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * Receive and process a control payload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * @i2400m: device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * @skb_rx: skb that contains the payload (for reference counting)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * @payload: pointer to message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * @size: size of the message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * There are two types of control RX messages: reports (asynchronous,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * like your every day interrupts) and 'acks' (reponses to a command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * get or set request).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * If it is a report, we run hooks on it (to extract information for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * things we need to do in the driver) and then pass it over to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * WiMAX stack to send it to user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * NOTE: report processing is done in a workqueue specific to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * generic driver, to avoid deadlocks in the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * If it is not a report, it is an ack to a previously executed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * command, set or get, so wake up whoever is waiting for it from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * i2400m_msg_to_dev(). i2400m_rx_ctl_ack() takes care of that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * Note that the sizes we pass to other functions from here are the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * sizes of the _l3l4_hdr + payload, not full buffer sizes, as we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * verified in _msg_size_check() that they are congruent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * For reports: We can't clone the original skb where the data is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * because we need to send this up via netlink; netlink has to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * headers and we can't overwrite what's preceding the payload...as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * it is another message. So we just dup them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) void i2400m_rx_ctl(struct i2400m *i2400m, struct sk_buff *skb_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) const void *payload, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) const struct i2400m_l3l4_hdr *l3l4_hdr = payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) unsigned msg_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) result = i2400m_msg_size_check(i2400m, l3l4_hdr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (result < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) dev_err(dev, "HW BUG? device sent a bad message: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) goto error_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) msg_type = le16_to_cpu(l3l4_hdr->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) d_printf(1, dev, "%s 0x%04x: %zu bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) msg_type & I2400M_MT_REPORT_MASK ? "REPORT" : "CMD/SET/GET",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) msg_type, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) d_dump(2, dev, l3l4_hdr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (msg_type & I2400M_MT_REPORT_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * Process each report
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * - has to be ran serialized as well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * - the handling might force the execution of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * commands. That might cause reentrancy issues with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * bus-specific subdrivers and workqueues, so the we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * run it in a separate workqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * - when the driver is not yet ready to handle them,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * they are queued and at some point the queue is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * restarted [NOTE: we can't queue SKBs directly, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * this might be a piece of a SKB, not the whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * thing, and this is cheaper than cloning the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * SKB].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * Note we don't do refcounting for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * structure; this is because before destroying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * 'i2400m', we make sure to flush the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * i2400m->work_queue, so there are no issues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) i2400m_report_hook_queue(i2400m, skb_rx, l3l4_hdr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (unlikely(i2400m->trace_msg_from_user))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) wimax_msg(&i2400m->wimax_dev, "echo",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) l3l4_hdr, size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) result = wimax_msg(&i2400m->wimax_dev, NULL, l3l4_hdr, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (result < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) dev_err(dev, "error sending report to userspace: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) } else /* an ack to a CMD, GET or SET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) i2400m_rx_ctl_ack(i2400m, payload, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) error_check:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * Receive and send up a trace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * @i2400m: device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * @skb_rx: skb that contains the trace (for reference counting)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * @payload: pointer to trace message inside the skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * @size: size of the message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * THe i2400m might produce trace information (diagnostics) and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * send them through a different kernel-to-user pipe (to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * clogging it).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * As in i2400m_rx_ctl(), we can't clone the original skb where the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * data is because we need to send this up via netlink; netlink has to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * add headers and we can't overwrite what's preceding the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * payload...as it is another message. So we just dup them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) void i2400m_rx_trace(struct i2400m *i2400m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) const void *payload, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) const struct i2400m_l3l4_hdr *l3l4_hdr = payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) unsigned msg_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) result = i2400m_msg_size_check(i2400m, l3l4_hdr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (result < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) dev_err(dev, "HW BUG? device sent a bad trace message: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) goto error_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) msg_type = le16_to_cpu(l3l4_hdr->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) d_printf(1, dev, "Trace %s 0x%04x: %zu bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) msg_type & I2400M_MT_REPORT_MASK ? "REPORT" : "CMD/SET/GET",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) msg_type, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) d_dump(2, dev, l3l4_hdr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) result = wimax_msg(wimax_dev, "trace", l3l4_hdr, size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (result < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) dev_err(dev, "error sending trace to userspace: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) error_check:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * Reorder queue data stored on skb->cb while the skb is queued in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * reorder queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct i2400m_roq_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) unsigned sn; /* Serial number for the skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) enum i2400m_cs cs; /* packet type for the skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * ReOrder Queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * @ws: Window Start; sequence number where the current window start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * is for this queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * @queue: the skb queue itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * @log: circular ring buffer used to log information about the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * reorder process in this queue that can be displayed in case of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * error to help diagnose it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * This is the head for a list of skbs. In the skb->cb member of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * skb when queued here contains a 'struct i2400m_roq_data' were we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * store the sequence number (sn) and the cs (packet type) coming from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * the RX payload header from the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) struct i2400m_roq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) unsigned ws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) struct sk_buff_head queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct i2400m_roq_log *log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) void __i2400m_roq_init(struct i2400m_roq *roq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) roq->ws = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) skb_queue_head_init(&roq->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) unsigned __i2400m_roq_index(struct i2400m *i2400m, struct i2400m_roq *roq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return ((unsigned long) roq - (unsigned long) i2400m->rx_roq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) / sizeof(*roq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * Normalize a sequence number based on the queue's window start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * nsn = (sn - ws) % 2048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * Note that if @sn < @roq->ws, we still need a positive number; %'s
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * sign is implementation specific, so we normalize it by adding 2048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * to bring it to be positive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) unsigned __i2400m_roq_nsn(struct i2400m_roq *roq, unsigned sn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) r = ((int) sn - (int) roq->ws) % 2048;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) r += 2048;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * Circular buffer to keep the last N reorder operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * In case something fails, dumb then to try to come up with what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * happened.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) I2400M_ROQ_LOG_LENGTH = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct i2400m_roq_log {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct i2400m_roq_log_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) enum i2400m_ro_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) unsigned ws, count, sn, nsn, new_ws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) } entry[I2400M_ROQ_LOG_LENGTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) unsigned in, out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /* Print a log entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) void i2400m_roq_log_entry_print(struct i2400m *i2400m, unsigned index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) unsigned e_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct i2400m_roq_log_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) switch(e->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) case I2400M_RO_TYPE_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) dev_err(dev, "q#%d reset ws %u cnt %u sn %u/%u"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) " - new nws %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) index, e->ws, e->count, e->sn, e->nsn, e->new_ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) case I2400M_RO_TYPE_PACKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) dev_err(dev, "q#%d queue ws %u cnt %u sn %u/%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) index, e->ws, e->count, e->sn, e->nsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) case I2400M_RO_TYPE_WS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) dev_err(dev, "q#%d update_ws ws %u cnt %u sn %u/%u"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) " - new nws %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) index, e->ws, e->count, e->sn, e->nsn, e->new_ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) case I2400M_RO_TYPE_PACKET_WS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) dev_err(dev, "q#%d queue_update_ws ws %u cnt %u sn %u/%u"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) " - new nws %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) index, e->ws, e->count, e->sn, e->nsn, e->new_ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) dev_err(dev, "q#%d BUG? entry %u - unknown type %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) index, e_index, e->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) void i2400m_roq_log_add(struct i2400m *i2400m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) struct i2400m_roq *roq, enum i2400m_ro_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) unsigned ws, unsigned count, unsigned sn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) unsigned nsn, unsigned new_ws)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) struct i2400m_roq_log_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) unsigned cnt_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) int index = __i2400m_roq_index(i2400m, roq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) /* if we run out of space, we eat from the end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (roq->log->in - roq->log->out == I2400M_ROQ_LOG_LENGTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) roq->log->out++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) cnt_idx = roq->log->in++ % I2400M_ROQ_LOG_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) e = &roq->log->entry[cnt_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) e->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) e->ws = ws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) e->count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) e->sn = sn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) e->nsn = nsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) e->new_ws = new_ws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (d_test(1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) i2400m_roq_log_entry_print(i2400m, index, cnt_idx, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /* Dump all the entries in the FIFO and reinitialize it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) void i2400m_roq_log_dump(struct i2400m *i2400m, struct i2400m_roq *roq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) unsigned cnt, cnt_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct i2400m_roq_log_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) int index = __i2400m_roq_index(i2400m, roq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) BUG_ON(roq->log->out > roq->log->in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) for (cnt = roq->log->out; cnt < roq->log->in; cnt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) cnt_idx = cnt % I2400M_ROQ_LOG_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) e = &roq->log->entry[cnt_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) i2400m_roq_log_entry_print(i2400m, index, cnt_idx, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) memset(e, 0, sizeof(*e));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) roq->log->in = roq->log->out = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * Backbone for the queuing of an skb (by normalized sequence number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * @i2400m: device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * @roq: reorder queue where to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * @skb: the skb to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * @sn: the sequence number of the skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * @nsn: the normalized sequence number of the skb (pre-computed by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * caller from the @sn and @roq->ws).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * We try first a couple of quick cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * - the queue is empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * - the skb would be appended to the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * These will be the most common operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * If these fail, then we have to do a sorted insertion in the queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * which is the slowest path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * We don't have to acquire a reference count as we are going to own it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) void __i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) struct sk_buff *skb, unsigned sn, unsigned nsn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct sk_buff *skb_itr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) struct i2400m_roq_data *roq_data_itr, *roq_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) unsigned nsn_itr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) d_fnstart(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) i2400m, roq, skb, sn, nsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) roq_data = (struct i2400m_roq_data *) &skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) BUILD_BUG_ON(sizeof(*roq_data) > sizeof(skb->cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) roq_data->sn = sn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) d_printf(3, dev, "ERX: roq %p [ws %u] nsn %d sn %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) roq, roq->ws, nsn, roq_data->sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) /* Queues will be empty on not-so-bad environments, so try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * that first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (skb_queue_empty(&roq->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) d_printf(2, dev, "ERX: roq %p - first one\n", roq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) __skb_queue_head(&roq->queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /* Now try append, as most of the operations will be that */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) skb_itr = skb_peek_tail(&roq->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /* NSN bounds assumed correct (checked when it was queued) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (nsn >= nsn_itr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) d_printf(2, dev, "ERX: roq %p - appended after %p (nsn %d sn %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) roq, skb_itr, nsn_itr, roq_data_itr->sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) __skb_queue_tail(&roq->queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) /* None of the fast paths option worked. Iterate to find the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * right spot where to insert the packet; we know the queue is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * not empty, so we are not the first ones; we also know we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * are not going to be the last ones. The list is sorted, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * we have to insert before the the first guy with an nsn_itr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * greater that our nsn. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) skb_queue_walk(&roq->queue, skb_itr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) /* NSN bounds assumed correct (checked when it was queued) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (nsn_itr > nsn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) d_printf(2, dev, "ERX: roq %p - queued before %p "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) "(nsn %d sn %u)\n", roq, skb_itr, nsn_itr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) roq_data_itr->sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) __skb_queue_before(&roq->queue, skb_itr, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /* If we get here, that is VERY bad -- print info to help
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * diagnose and crash it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) dev_err(dev, "SW BUG? failed to insert packet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) dev_err(dev, "ERX: roq %p [ws %u] skb %p nsn %d sn %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) roq, roq->ws, skb, nsn, roq_data->sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) skb_queue_walk(&roq->queue, skb_itr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) /* NSN bounds assumed correct (checked when it was queued) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) dev_err(dev, "ERX: roq %p skb_itr %p nsn %d sn %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) roq, skb_itr, nsn_itr, roq_data_itr->sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) d_fnend(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %d) = void\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) i2400m, roq, skb, sn, nsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * Backbone for the update window start operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * @i2400m: device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * @roq: Reorder queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * @sn: New sequence number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * Updates the window start of a queue; when doing so, it must deliver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * to the networking stack all the queued skb's whose normalized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * sequence number is lower than the new normalized window start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) unsigned __i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) unsigned sn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) struct sk_buff *skb_itr, *tmp_itr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) struct i2400m_roq_data *roq_data_itr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) unsigned new_nws, nsn_itr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) new_nws = __i2400m_roq_nsn(roq, sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * For type 2(update_window_start) rx messages, there is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * need to check if the normalized sequence number is greater 1023.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * Simply insert and deliver all packets to the host up to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * window start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) /* NSN bounds assumed correct (checked when it was queued) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (nsn_itr < new_nws) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) d_printf(2, dev, "ERX: roq %p - release skb %p "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) "(nsn %u/%u new nws %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) roq, skb_itr, nsn_itr, roq_data_itr->sn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) new_nws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) __skb_unlink(skb_itr, &roq->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) i2400m_net_erx(i2400m, skb_itr, roq_data_itr->cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) break; /* rest of packets all nsn_itr > nws */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) roq->ws = sn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) return new_nws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * Reset a queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * @i2400m: device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * @cin: Queue Index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * Deliver all the packets and reset the window-start to zero. Name is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * kind of misleading.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) void i2400m_roq_reset(struct i2400m *i2400m, struct i2400m_roq *roq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) struct sk_buff *skb_itr, *tmp_itr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) struct i2400m_roq_data *roq_data_itr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) d_fnstart(2, dev, "(i2400m %p roq %p)\n", i2400m, roq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_RESET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) roq->ws, skb_queue_len(&roq->queue),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) ~0, ~0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) d_printf(2, dev, "ERX: roq %p - release skb %p (sn %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) roq, skb_itr, roq_data_itr->sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) __skb_unlink(skb_itr, &roq->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) i2400m_net_erx(i2400m, skb_itr, roq_data_itr->cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) roq->ws = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) d_fnend(2, dev, "(i2400m %p roq %p) = void\n", i2400m, roq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * Queue a packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * @i2400m: device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * @cin: Queue Index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * @skb: containing the packet data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * @fbn: First block number of the packet in @skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * @lbn: Last block number of the packet in @skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * The hardware is asking the driver to queue a packet for later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * delivery to the networking stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) void i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct sk_buff * skb, unsigned lbn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) unsigned nsn, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) d_fnstart(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) i2400m, roq, skb, lbn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) len = skb_queue_len(&roq->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) nsn = __i2400m_roq_nsn(roq, lbn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (unlikely(nsn >= 1024)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) dev_err(dev, "SW BUG? queue nsn %d (lbn %u ws %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) nsn, lbn, roq->ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) i2400m_roq_log_dump(i2400m, roq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) i2400m_reset(i2400m, I2400M_RT_WARM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) __i2400m_roq_queue(i2400m, roq, skb, lbn, nsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) roq->ws, len, lbn, nsn, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) d_fnend(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) i2400m, roq, skb, lbn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * Update the window start in a reorder queue and deliver all skbs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * with a lower window start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * @i2400m: device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * @roq: Reorder queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * @sn: New sequence number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) void i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) unsigned sn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) unsigned old_ws, nsn, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) d_fnstart(2, dev, "(i2400m %p roq %p sn %u)\n", i2400m, roq, sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) old_ws = roq->ws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) len = skb_queue_len(&roq->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) nsn = __i2400m_roq_update_ws(i2400m, roq, sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_WS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) old_ws, len, sn, nsn, roq->ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) d_fnstart(2, dev, "(i2400m %p roq %p sn %u) = void\n", i2400m, roq, sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * Queue a packet and update the window start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * @i2400m: device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * @cin: Queue Index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * @skb: containing the packet data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * @fbn: First block number of the packet in @skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * @sn: Last block number of the packet in @skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * Note that unlike i2400m_roq_update_ws(), which sets the new window
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * start to @sn, in here we'll set it to @sn + 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) struct sk_buff * skb, unsigned sn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) unsigned nsn, old_ws, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) d_fnstart(2, dev, "(i2400m %p roq %p skb %p sn %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) i2400m, roq, skb, sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) len = skb_queue_len(&roq->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) nsn = __i2400m_roq_nsn(roq, sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * For type 3(queue_update_window_start) rx messages, there is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * need to check if the normalized sequence number is greater 1023.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * Simply insert and deliver all packets to the host up to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * window start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) old_ws = roq->ws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) /* If the queue is empty, don't bother as we'd queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * it and immediately unqueue it -- just deliver it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) struct i2400m_roq_data *roq_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) roq_data = (struct i2400m_roq_data *) &skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) i2400m_net_erx(i2400m, skb, roq_data->cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) __i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) __i2400m_roq_update_ws(i2400m, roq, sn + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) old_ws, len, sn, nsn, roq->ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) d_fnend(2, dev, "(i2400m %p roq %p skb %p sn %u) = void\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) i2400m, roq, skb, sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * This routine destroys the memory allocated for rx_roq, when no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * other thread is accessing it. Access to rx_roq is refcounted by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * rx_roq_refcount, hence memory allocated must be destroyed when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * rx_roq_refcount becomes zero. This routine gets executed when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * rx_roq_refcount becomes zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) static void i2400m_rx_roq_destroy(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) unsigned itr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) struct i2400m *i2400m
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) = container_of(ref, struct i2400m, rx_roq_refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) for (itr = 0; itr < I2400M_RO_CIN + 1; itr++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) __skb_queue_purge(&i2400m->rx_roq[itr].queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) kfree(i2400m->rx_roq[0].log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) kfree(i2400m->rx_roq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) i2400m->rx_roq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * Receive and send up an extended data packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * @i2400m: device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * @skb_rx: skb that contains the extended data packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * @single_last: 1 if the payload is the only one or the last one of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * the skb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * @payload: pointer to the packet's data inside the skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * @size: size of the payload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * Starting in v1.4 of the i2400m's firmware, the device can send data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * packets to the host in an extended format that; this incudes a 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * byte header (struct i2400m_pl_edata_hdr). Using this header's space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * we can fake ethernet headers for ethernet device emulation without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * having to copy packets around.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * This function handles said path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * Receive and send up an extended data packet that requires no reordering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * @i2400m: device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * @skb_rx: skb that contains the extended data packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * @single_last: 1 if the payload is the only one or the last one of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * the skb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * @payload: pointer to the packet's data (past the actual extended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * data payload header).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * @size: size of the payload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * Pass over to the networking stack a data packet that might have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * reordering requirements.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * This needs to the decide if the skb in which the packet is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * contained can be reused or if it needs to be cloned. Then it has to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * be trimmed in the edges so that the beginning is the space for eth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * header and then pass it to i2400m_net_erx() for the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * Assumes the caller has verified the sanity of the payload (size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * etc) already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) unsigned single_last, const void *payload, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) const struct i2400m_pl_edata_hdr *hdr = payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) struct net_device *net_dev = i2400m->wimax_dev.net_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) enum i2400m_cs cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) u32 reorder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) unsigned ro_needed, ro_type, ro_cin, ro_sn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) struct i2400m_roq *roq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) struct i2400m_roq_data *roq_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) BUILD_BUG_ON(ETH_HLEN > sizeof(*hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) d_fnstart(2, dev, "(i2400m %p skb_rx %p single %u payload %p "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) "size %zu)\n", i2400m, skb_rx, single_last, payload, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (size < sizeof(*hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) dev_err(dev, "ERX: HW BUG? message with short header (%zu "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) "vs %zu bytes expected)\n", size, sizeof(*hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (single_last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) skb = skb_get(skb_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) d_printf(3, dev, "ERX: skb %p reusing\n", skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) skb = skb_clone(skb_rx, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) dev_err(dev, "ERX: no memory to clone skb\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) net_dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) goto error_skb_clone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) d_printf(3, dev, "ERX: skb %p cloned from %p\n", skb, skb_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) /* now we have to pull and trim so that the skb points to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) * beginning of the IP packet; the netdev part will add the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * ethernet header as needed - we know there is enough space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * because we checked in i2400m_rx_edata(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) skb_pull(skb, payload + sizeof(*hdr) - (void *) skb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) skb_trim(skb, (void *) skb_end_pointer(skb) - payload - sizeof(*hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) reorder = le32_to_cpu(hdr->reorder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) ro_needed = reorder & I2400M_RO_NEEDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) cs = hdr->cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (ro_needed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) ro_type = (reorder >> I2400M_RO_TYPE_SHIFT) & I2400M_RO_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) ro_cin = (reorder >> I2400M_RO_CIN_SHIFT) & I2400M_RO_CIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) spin_lock_irqsave(&i2400m->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (i2400m->rx_roq == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) kfree_skb(skb); /* rx_roq is already destroyed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) spin_unlock_irqrestore(&i2400m->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) roq = &i2400m->rx_roq[ro_cin];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) kref_get(&i2400m->rx_roq_refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) spin_unlock_irqrestore(&i2400m->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) roq_data = (struct i2400m_roq_data *) &skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) roq_data->sn = ro_sn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) roq_data->cs = cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) d_printf(2, dev, "ERX: reorder needed: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) "type %u cin %u [ws %u] sn %u/%u len %zuB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) ro_type, ro_cin, roq->ws, ro_sn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) __i2400m_roq_nsn(roq, ro_sn), size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) d_dump(2, dev, payload, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) switch(ro_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) case I2400M_RO_TYPE_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) i2400m_roq_reset(i2400m, roq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) kfree_skb(skb); /* no data here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) case I2400M_RO_TYPE_PACKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) i2400m_roq_queue(i2400m, roq, skb, ro_sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) case I2400M_RO_TYPE_WS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) i2400m_roq_update_ws(i2400m, roq, ro_sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) kfree_skb(skb); /* no data here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) case I2400M_RO_TYPE_PACKET_WS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) i2400m_roq_queue_update_ws(i2400m, roq, skb, ro_sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) dev_err(dev, "HW BUG? unknown reorder type %u\n", ro_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) spin_lock_irqsave(&i2400m->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) spin_unlock_irqrestore(&i2400m->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) i2400m_net_erx(i2400m, skb, cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) error_skb_clone:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) d_fnend(2, dev, "(i2400m %p skb_rx %p single %u payload %p "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) "size %zu) = void\n", i2400m, skb_rx, single_last, payload, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) * Act on a received payload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) * @i2400m: device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) * @skb_rx: skb where the transaction was received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * @single_last: 1 this is the only payload or the last one (so the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) * skb can be reused instead of cloned).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * @pld: payload descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * @payload: payload data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) * Upon reception of a payload, look at its guts in the payload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * descriptor and decide what to do with it. If it is a single payload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * skb or if the last skb is a data packet, the skb will be referenced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * and modified (so it doesn't have to be cloned).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) void i2400m_rx_payload(struct i2400m *i2400m, struct sk_buff *skb_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) unsigned single_last, const struct i2400m_pld *pld,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) const void *payload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) size_t pl_size = i2400m_pld_size(pld);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) enum i2400m_pt pl_type = i2400m_pld_type(pld);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) d_printf(7, dev, "RX: received payload type %u, %zu bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) pl_type, pl_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) d_dump(8, dev, payload, pl_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) switch (pl_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) case I2400M_PT_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) d_printf(3, dev, "RX: data payload %zu bytes\n", pl_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) i2400m_net_rx(i2400m, skb_rx, single_last, payload, pl_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) case I2400M_PT_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) i2400m_rx_ctl(i2400m, skb_rx, payload, pl_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) case I2400M_PT_TRACE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) i2400m_rx_trace(i2400m, payload, pl_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) case I2400M_PT_EDATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) d_printf(3, dev, "ERX: data payload %zu bytes\n", pl_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) i2400m_rx_edata(i2400m, skb_rx, single_last, payload, pl_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) default: /* Anything else shouldn't come to the host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) dev_err(dev, "RX: HW BUG? unexpected payload type %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) pl_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * Check a received transaction's message header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) * @i2400m: device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * @msg_hdr: message header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * @buf_size: size of the received buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * Check that the declarations done by a RX buffer message header are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * sane and consistent with the amount of data that was received.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) int i2400m_rx_msg_hdr_check(struct i2400m *i2400m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) const struct i2400m_msg_hdr *msg_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) size_t buf_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) int result = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (buf_size < sizeof(*msg_hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) dev_err(dev, "RX: HW BUG? message with short header (%zu "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) "vs %zu bytes expected)\n", buf_size, sizeof(*msg_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (msg_hdr->barker != cpu_to_le32(I2400M_D2H_MSG_BARKER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) dev_err(dev, "RX: HW BUG? message received with unknown "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) "barker 0x%08x (buf_size %zu bytes)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) le32_to_cpu(msg_hdr->barker), buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (msg_hdr->num_pls == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) dev_err(dev, "RX: HW BUG? zero payload packets in message\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (le16_to_cpu(msg_hdr->num_pls) > I2400M_MAX_PLS_IN_MSG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) dev_err(dev, "RX: HW BUG? message contains more payload "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) "than maximum; ignoring.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) * Check a payload descriptor against the received data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) * @i2400m: device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * @pld: payload descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * @pl_itr: offset (in bytes) in the received buffer the payload is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * located
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * @buf_size: size of the received buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * Given a payload descriptor (part of a RX buffer), check it is sane
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * and that the data it declares fits in the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) int i2400m_rx_pl_descr_check(struct i2400m *i2400m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) const struct i2400m_pld *pld,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) size_t pl_itr, size_t buf_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) int result = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) size_t pl_size = i2400m_pld_size(pld);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) enum i2400m_pt pl_type = i2400m_pld_type(pld);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (pl_size > i2400m->bus_pl_size_max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) dev_err(dev, "RX: HW BUG? payload @%zu: size %zu is "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) "bigger than maximum %zu; ignoring message\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) pl_itr, pl_size, i2400m->bus_pl_size_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (pl_itr + pl_size > buf_size) { /* enough? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) dev_err(dev, "RX: HW BUG? payload @%zu: size %zu "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) "goes beyond the received buffer "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) "size (%zu bytes); ignoring message\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) pl_itr, pl_size, buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (pl_type >= I2400M_PT_ILLEGAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) dev_err(dev, "RX: HW BUG? illegal payload type %u; "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) "ignoring message\n", pl_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) * i2400m_rx - Receive a buffer of data from the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * @i2400m: device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * @skb: skbuff where the data has been received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * Parse in a buffer of data that contains an RX message sent from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) * device. See the file header for the format. Run all checks on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) * buffer header, then run over each payload's descriptors, verify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) * their consistency and act on each payload's contents. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * everything is successful, update the device's statistics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * Note: You need to set the skb to contain only the length of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * received buffer; for that, use skb_trim(skb, RECEIVED_SIZE).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) * 0 if ok, < 0 errno on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) * If ok, this function owns now the skb and the caller DOESN'T have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) * to run kfree_skb() on it. However, on error, the caller still owns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) * the skb and it is responsible for releasing it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) int i, result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) const struct i2400m_msg_hdr *msg_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) size_t pl_itr, pl_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) unsigned num_pls, single_last, skb_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) skb_len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) d_fnstart(4, dev, "(i2400m %p skb %p [size %u])\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) i2400m, skb, skb_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) msg_hdr = (void *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if (result < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) goto error_msg_hdr_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) result = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) num_pls = le16_to_cpu(msg_hdr->num_pls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) /* Check payload descriptor(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) pl_itr = struct_size(msg_hdr, pld, num_pls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) pl_itr = ALIGN(pl_itr, I2400M_PL_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (pl_itr > skb_len) { /* got all the payload descriptors? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) dev_err(dev, "RX: HW BUG? message too short (%u bytes) for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) "%u payload descriptors (%zu each, total %zu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) skb_len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) goto error_pl_descr_short;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) /* Walk each payload payload--check we really got it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) for (i = 0; i < num_pls; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) /* work around old gcc warnings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) pl_size = i2400m_pld_size(&msg_hdr->pld[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) pl_itr, skb_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) if (result < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) goto error_pl_descr_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) single_last = num_pls == 1 || i == num_pls - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) i2400m_rx_payload(i2400m, skb, single_last, &msg_hdr->pld[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) skb->data + pl_itr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) pl_itr += ALIGN(pl_size, I2400M_PL_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) cond_resched(); /* Don't monopolize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) /* Update device statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) spin_lock_irqsave(&i2400m->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) i2400m->rx_pl_num += i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) if (i > i2400m->rx_pl_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) i2400m->rx_pl_max = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (i < i2400m->rx_pl_min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) i2400m->rx_pl_min = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) i2400m->rx_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) i2400m->rx_size_acc += skb_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (skb_len < i2400m->rx_size_min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) i2400m->rx_size_min = skb_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (skb_len > i2400m->rx_size_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) i2400m->rx_size_max = skb_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) spin_unlock_irqrestore(&i2400m->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) error_pl_descr_check:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) error_pl_descr_short:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) error_msg_hdr_check:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) d_fnend(4, dev, "(i2400m %p skb %p [size %u]) = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) i2400m, skb, skb_len, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) EXPORT_SYMBOL_GPL(i2400m_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) void i2400m_unknown_barker(struct i2400m *i2400m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) const void *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) char prefix[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) const __le32 *barker = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) dev_err(dev, "RX: HW BUG? unknown barker %08x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) "dropping %zu bytes\n", le32_to_cpu(*barker), size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) snprintf(prefix, sizeof(prefix), "%s %s: ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) dev_driver_string(dev), dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) if (size > 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 8, 4, buf, 64, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) printk(KERN_ERR "%s... (only first 64 bytes "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) "dumped)\n", prefix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 8, 4, buf, size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) EXPORT_SYMBOL(i2400m_unknown_barker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) * Initialize the RX queue and infrastructure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) * This sets up all the RX reordering infrastructures, which will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) * be used if reordering is not enabled or if the firmware does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) * support it. The device is told to do reordering in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) * i2400m_dev_initialize(), where it also looks at the value of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) * i2400m->rx_reorder switch before taking a decission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) * Note we allocate the roq queues in one chunk and the actual logging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) * support for it (logging) in another one and then we setup the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) * pointers from the first to the last.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) int i2400m_rx_setup(struct i2400m *i2400m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) i2400m->rx_reorder = i2400m_rx_reorder_disabled? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (i2400m->rx_reorder) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) unsigned itr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) struct i2400m_roq_log *rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) result = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) i2400m->rx_roq = kcalloc(I2400M_RO_CIN + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) sizeof(i2400m->rx_roq[0]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) if (i2400m->rx_roq == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) goto error_roq_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) if (rd == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) result = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) goto error_roq_log_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) for(itr = 0; itr < I2400M_RO_CIN + 1; itr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) __i2400m_roq_init(&i2400m->rx_roq[itr]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) i2400m->rx_roq[itr].log = &rd[itr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) kref_init(&i2400m->rx_roq_refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) error_roq_log_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) kfree(i2400m->rx_roq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) error_roq_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) /* Tear down the RX queue and infrastructure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) void i2400m_rx_release(struct i2400m *i2400m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) if (i2400m->rx_reorder) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) spin_lock_irqsave(&i2400m->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) spin_unlock_irqrestore(&i2400m->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) /* at this point, nothing can be received... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) i2400m_report_hook_flush(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) }