^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Intel Wireless WiMAX Connection 2400m
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Glue with the networking stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2007 Intel Corporation <linux-wimax@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Yanir Lubetkin <yanirx.lubetkin@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * This implements an ethernet device for the i2400m.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * We fake being an ethernet device to simplify the support from user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * space and from the other side. The world is (sadly) configured to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * take in only Ethernet devices...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Because of this, when using firmwares <= v1.3, there is an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * copy-each-rxed-packet overhead on the RX path. Each IP packet has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * to be reallocated to add an ethernet header (as there is no space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * in what we get from the device). This is a known drawback and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * firmwares >= 1.4 add header space that can be used to insert the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * ethernet header without having to reallocate and copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * TX error handling is tricky; because we have to FIFO/queue the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * buffers for transmission (as the hardware likes it aggregated), we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * just give the skb to the TX subsystem and by the time it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * transmitted, we have long forgotten about it. So we just don't care
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * too much about it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Note that when the device is in idle mode with the basestation, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * need to negotiate coming back up online. That involves negotiation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * and possible user space interaction. Thus, we defer to a workqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * to do all that. By default, we only queue a single packet and drop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * the rest, as potentially the time to go back from idle to normal is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * long.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * ROADMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * i2400m_open Called on ifconfig up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * i2400m_stop Called on ifconfig down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * i2400m_hard_start_xmit Called by the network stack to send a packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * i2400m_net_wake_tx Wake up device from basestation-IDLE & TX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * i2400m_wake_tx_work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * i2400m_cmd_exit_idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * i2400m_tx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * i2400m_net_tx TX a data frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * i2400m_tx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * i2400m_change_mtu Called on ifconfig mtu XXX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * i2400m_tx_timeout Called when the device times out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * i2400m_net_rx Called by the RX code when a data frame is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * available (firmware <= 1.3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * i2400m_net_erx Called by the RX code when a data frame is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * available (firmware >= 1.4).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * i2400m_netdev_setup Called to setup all the netdev stuff from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * alloc_netdev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <linux/if_arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #include "i2400m.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define D_SUBMODULE netdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #include "debug-levels.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* netdev interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* 20 secs? yep, this is the maximum timeout that the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * might take to get out of IDLE / negotiate it with the base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * station. We add 1sec for good measure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) I2400M_TX_TIMEOUT = 21 * HZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * Experimentation has determined that, 20 to be a good value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * for minimizing the jitter in the throughput.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) I2400M_TX_QLEN = 20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) int i2400m_open(struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* Make sure we wait until init is complete... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) mutex_lock(&i2400m->init_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (i2400m->updown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) result = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) mutex_unlock(&i2400m->init_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) net_dev, i2400m, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) int i2400m_stop(struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) i2400m_net_wake_stop(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) d_fnend(3, dev, "(net_dev %p [i2400m %p]) = 0\n", net_dev, i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * Wake up the device and transmit a held SKB, then restart the net queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * When the device goes into basestation-idle mode, we need to tell it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * to exit that mode; it will negotiate with the base station, user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * space may have to intervene to rehandshake crypto and then tell us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * when it is ready to transmit the packet we have "queued". Still we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * need to give it sometime after it reports being ok.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * On error, there is not much we can do. If the error was on TX, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * still wake the queue up to see if the next packet will be luckier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * If _cmd_exit_idle() fails...well, it could be many things; most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * commonly it is that something else took the device out of IDLE mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * (for example, the base station). In that case we get an -EILSEQ and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * we are just going to ignore that one. If the device is back to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * connected, then fine -- if it is someother state, the packet will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * be dropped anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) void i2400m_wake_tx_work(struct work_struct *ws)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct net_device *net_dev = i2400m->wimax_dev.net_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) spin_lock_irqsave(&i2400m->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) skb = i2400m->wake_tx_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) i2400m->wake_tx_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) spin_unlock_irqrestore(&i2400m->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) d_fnstart(3, dev, "(ws %p i2400m %p skb %p)\n", ws, i2400m, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) result = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) dev_err(dev, "WAKE&TX: skb disappeared!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* If we have, somehow, lost the connection after this was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * queued, don't do anything; this might be the device got
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * reset or just disconnected. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (unlikely(!netif_carrier_ok(net_dev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) goto out_kfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) result = i2400m_cmd_exit_idle(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (result == -EILSEQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (result < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) dev_err(dev, "WAKE&TX: device didn't get out of idle: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) "%d - resetting\n", result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) i2400m_reset(i2400m, I2400M_RT_BUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) result = wait_event_timeout(i2400m->state_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) i2400m->state != I2400M_SS_IDLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) net_dev->watchdog_timeo - HZ/2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (result == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) result = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (result < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) dev_err(dev, "WAKE&TX: error waiting for device to exit IDLE: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) "%d - resetting\n", result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) i2400m_reset(i2400m, I2400M_RT_BUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) msleep(20); /* device still needs some time or it drops it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) netif_wake_queue(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) out_kfree:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) kfree_skb(skb); /* refcount transferred by _hard_start_xmit() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) i2400m_put(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) d_fnend(3, dev, "(ws %p i2400m %p skb %p) = void [%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ws, i2400m, skb, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * Prepare the data payload TX header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * The i2400m expects a 4 byte header in front of a data packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * Because we pretend to be an ethernet device, this packet comes with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * an ethernet header. Pull it and push our header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) void i2400m_tx_prep_header(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct i2400m_pl_data_hdr *pl_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) skb_pull(skb, ETH_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) pl_hdr = skb_push(skb, sizeof(*pl_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) pl_hdr->reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * Cleanup resources acquired during i2400m_net_wake_tx()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * This is called by __i2400m_dev_stop and means we have to make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * the workqueue is flushed from any pending work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) void i2400m_net_wake_stop(struct i2400m *i2400m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct sk_buff *wake_tx_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * See i2400m_hard_start_xmit(), references are taken there and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * here we release them if the packet was still pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) cancel_work_sync(&i2400m->wake_tx_ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) spin_lock_irqsave(&i2400m->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) wake_tx_skb = i2400m->wake_tx_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) i2400m->wake_tx_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) spin_unlock_irqrestore(&i2400m->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (wake_tx_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) i2400m_put(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) kfree_skb(wake_tx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * TX an skb to an idle device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * When the device is in basestation-idle mode, we need to wake it up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * and then TX. So we queue a work_struct for doing so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * We need to get an extra ref for the skb (so it is not dropped), as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * well as be careful not to queue more than one request (won't help
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * at all). If more than one request comes or there are errors, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * just drop the packets (see i2400m_hard_start_xmit()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) int i2400m_net_wake_tx(struct i2400m *i2400m, struct net_device *net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (net_ratelimit()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) d_printf(3, dev, "WAKE&NETTX: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) "skb %p sending %d bytes to radio\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) skb, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) d_dump(4, dev, skb->data, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /* We hold a ref count for i2400m and skb, so when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * stopping() the device, we need to cancel that work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * and if pending, release those resources. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) spin_lock_irqsave(&i2400m->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (!i2400m->wake_tx_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) netif_stop_queue(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) i2400m_get(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) i2400m->wake_tx_skb = skb_get(skb); /* transfer ref count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) i2400m_tx_prep_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) result = schedule_work(&i2400m->wake_tx_ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) WARN_ON(result == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) spin_unlock_irqrestore(&i2400m->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (result == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /* Yes, this happens even if we stopped the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * queue -- blame the queue disciplines that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * queue without looking -- I guess there is a reason
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * for that. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) d_printf(1, dev, "NETTX: device exiting idle, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) "dropping skb %p, queue running %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) skb, netif_queue_stopped(net_dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) result = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * Transmit a packet to the base station on behalf of the network stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * Returns: 0 if ok, < 0 errno code on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * We need to pull the ethernet header and add the hardware header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * which is currently set to all zeroes and reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) int i2400m_net_tx(struct i2400m *i2400m, struct net_device *net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) d_fnstart(3, dev, "(i2400m %p net_dev %p skb %p)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) i2400m, net_dev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /* FIXME: check eth hdr, only IPv4 is routed by the device as of now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) netif_trans_update(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) i2400m_tx_prep_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) d_printf(3, dev, "NETTX: skb %p sending %d bytes to radio\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) skb, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) d_dump(4, dev, skb->data, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) d_fnend(3, dev, "(i2400m %p net_dev %p skb %p) = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) i2400m, net_dev, skb, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * Transmit a packet to the base station on behalf of the network stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * Returns: NETDEV_TX_OK (always, even in case of error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * In case of error, we just drop it. Reasons:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * - we add a hw header to each skb, and if the network stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * retries, we have no way to know if that skb has it or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * - network protocols have their own drop-recovery mechanisms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * - there is not much else we can do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * If the device is idle, we need to wake it up; that is an operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * that will sleep. See i2400m_net_wake_tx() for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) int result = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (skb_cow_head(skb, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (i2400m->state == I2400M_SS_IDLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) result = i2400m_net_wake_tx(i2400m, net_dev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) result = i2400m_net_tx(i2400m, net_dev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (result < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) net_dev->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) net_dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) net_dev->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) void i2400m_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * We might want to kick the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * There is not much we can do though, as the device requires
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * that we send the data aggregated. By the time we receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * this, there might be data pending to be sent or not...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) net_dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * Create a fake ethernet header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * For emulating an ethernet device, every received IP header has to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * be prefixed with an ethernet header. Fake it with the given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * protocol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) void i2400m_rx_fake_eth_header(struct net_device *net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) void *_eth_hdr, __be16 protocol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct ethhdr *eth_hdr = _eth_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) memcpy(eth_hdr->h_dest, net_dev->dev_addr, sizeof(eth_hdr->h_dest));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) memcpy(eth_hdr->h_source, i2400m->src_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) sizeof(eth_hdr->h_source));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) eth_hdr->h_proto = protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * i2400m_net_rx - pass a network packet to the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * @i2400m: device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * @skb_rx: the skb where the buffer pointed to by @buf is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * @i: 1 if payload is the only one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * @buf: pointer to the buffer containing the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * @len: buffer's length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * This is only used now for the v1.3 firmware. It will be deprecated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * in >= 2.6.31.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * Note that due to firmware limitations, we don't have space to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * an ethernet header, so we need to copy each packet. Firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * versions >= v1.4 fix this [see i2400m_net_erx()].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * We just clone the skb and set it up so that it's skb->data pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * points to "buf" and it's length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * Note that if the payload is the last (or the only one) in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * multi-payload message, we don't clone the SKB but just reuse it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * This function is normally run from a thread context. However, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * still use netif_rx() instead of netif_receive_skb() as was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * recommended in the mailing list. Reason is in some stress tests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * when sending/receiving a lot of data we seem to hit a softlock in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * the kernel's TCP implementation [aroudn tcp_delay_timer()]. Using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * netif_rx() took care of the issue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * This is, of course, still open to do more research on why running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * with netif_receive_skb() hits this softlock. FIXME.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * FIXME: currently we don't do any efforts at distinguishing if what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * we got was an IPv4 or IPv6 header, to setup the protocol field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) void i2400m_net_rx(struct i2400m *i2400m, struct sk_buff *skb_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) unsigned i, const void *buf, int buf_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct net_device *net_dev = i2400m->wimax_dev.net_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) d_fnstart(2, dev, "(i2400m %p buf %p buf_len %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) i2400m, buf, buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) skb = skb_get(skb_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) d_printf(2, dev, "RX: reusing first payload skb %p\n", skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) skb_pull(skb, buf - (void *) skb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) skb_trim(skb, (void *) skb_end_pointer(skb) - buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /* Yes, this is bad -- a lot of overhead -- see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * comments at the top of the file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) skb = __netdev_alloc_skb(net_dev, buf_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) dev_err(dev, "NETRX: no memory to realloc skb\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) net_dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) goto error_skb_realloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) skb_put_data(skb, buf, buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) skb->data - ETH_HLEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) cpu_to_be16(ETH_P_IP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) skb_set_mac_header(skb, -ETH_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) skb->dev = i2400m->wimax_dev.net_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) skb->protocol = htons(ETH_P_IP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) net_dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) net_dev->stats.rx_bytes += buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) d_printf(3, dev, "NETRX: receiving %d bytes to network stack\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) d_dump(4, dev, buf, buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) netif_rx_ni(skb); /* see notes in function header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) error_skb_realloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) d_fnend(2, dev, "(i2400m %p buf %p buf_len %d) = void\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) i2400m, buf, buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * i2400m_net_erx - pass a network packet to the stack (extended version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * @i2400m: device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * @skb: the skb where the packet is - the skb should be set to point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * at the IP packet; this function will add ethernet headers if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * @cs: packet type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * This is only used now for firmware >= v1.4. Note it is quite
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * similar to i2400m_net_rx() (used only for v1.3 firmware).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * This function is normally run from a thread context. However, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * still use netif_rx() instead of netif_receive_skb() as was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * recommended in the mailing list. Reason is in some stress tests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * when sending/receiving a lot of data we seem to hit a softlock in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * the kernel's TCP implementation [aroudn tcp_delay_timer()]. Using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * netif_rx() took care of the issue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * This is, of course, still open to do more research on why running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * with netif_receive_skb() hits this softlock. FIXME.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) void i2400m_net_erx(struct i2400m *i2400m, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) enum i2400m_cs cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct net_device *net_dev = i2400m->wimax_dev.net_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) d_fnstart(2, dev, "(i2400m %p skb %p [%u] cs %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) i2400m, skb, skb->len, cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) switch(cs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) case I2400M_CS_IPV4_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) case I2400M_CS_IPV4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) skb->data - ETH_HLEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) cpu_to_be16(ETH_P_IP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) skb_set_mac_header(skb, -ETH_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) skb->dev = i2400m->wimax_dev.net_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) skb->protocol = htons(ETH_P_IP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) net_dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) net_dev->stats.rx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) dev_err(dev, "ERX: BUG? CS type %u unsupported\n", cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) d_printf(3, dev, "ERX: receiving %d bytes to the network stack\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) d_dump(4, dev, skb->data, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) netif_rx_ni(skb); /* see notes in function header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) d_fnend(2, dev, "(i2400m %p skb %p [%u] cs %d) = void\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) i2400m, skb, skb->len, cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) static const struct net_device_ops i2400m_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) .ndo_open = i2400m_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) .ndo_stop = i2400m_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) .ndo_start_xmit = i2400m_hard_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) .ndo_tx_timeout = i2400m_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) static void i2400m_get_drvinfo(struct net_device *net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) strlcpy(info->fw_version, i2400m->fw_name ? : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) sizeof(info->fw_version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (net_dev->dev.parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) strlcpy(info->bus_info, dev_name(net_dev->dev.parent),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) static const struct ethtool_ops i2400m_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) .get_drvinfo = i2400m_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) .get_link = ethtool_op_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * i2400m_netdev_setup - Setup setup @net_dev's i2400m private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * Called by alloc_netdev()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) void i2400m_netdev_setup(struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) d_fnstart(3, NULL, "(net_dev %p)\n", net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) ether_setup(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) net_dev->mtu = I2400M_MAX_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) net_dev->min_mtu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) net_dev->max_mtu = I2400M_MAX_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) net_dev->tx_queue_len = I2400M_TX_QLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) net_dev->features =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) NETIF_F_VLAN_CHALLENGED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) | NETIF_F_HIGHDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) net_dev->flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) IFF_NOARP /* i2400m is apure IP device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) & (~IFF_BROADCAST /* i2400m is P2P */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) & ~IFF_MULTICAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) net_dev->watchdog_timeo = I2400M_TX_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) net_dev->netdev_ops = &i2400m_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) net_dev->ethtool_ops = &i2400m_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) EXPORT_SYMBOL_GPL(i2400m_netdev_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)