^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Intel Wireless WiMAX Connection 2400m
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * USB RX handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * * Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * * Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * notice, this list of conditions and the following disclaimer in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * the documentation and/or other materials provided with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * * Neither the name of Intel Corporation nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * contributors may be used to endorse or promote products derived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * from this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Intel Corporation <linux-wimax@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Yanir Lubetkin <yanirx.lubetkin@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * - Initial implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * - Use skb_clone(), break up processing in chunks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * - Split transport/device specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * - Make buffer size dynamic to exert less memory pressure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * This handles the RX path on USB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * When a notification is received that says 'there is RX data ready',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * we call i2400mu_rx_kick(); that wakes up the RX kthread, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * reads a buffer from USB and passes it to i2400m_rx() in the generic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * handling code. The RX buffer has an specific format that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * described in rx.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * We use a kernel thread in a loop because:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * - we want to be able to call the USB power management get/put
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * functions (blocking) before each transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * - We might get a lot of notifications and we don't want to submit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * a zillion reads; by serializing, we are throttling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * - RX data processing can get heavy enough so that it is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * appropriate for doing it in the USB callback; thus we run it in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * process context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * We provide a read buffer of an arbitrary size (short of a page); if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * the callback reports -EOVERFLOW, it means it was too small, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * just double the size and retry (being careful to append, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * sometimes the device provided some data). Every now and then we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * check if the average packet size is smaller than the current packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * size and if so, we halve it. At the end, the size of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * preallocated buffer should be following the average received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * transaction size, adapting dynamically to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * ROADMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * i2400mu_rx_kick() Called from notif.c when we get a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * 'data ready' notification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * i2400mu_rxd() Kernel RX daemon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * i2400mu_rx() Receive USB data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * i2400m_rx() Send data to generic i2400m RX handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * i2400mu_rx_setup() called from i2400mu_bus_dev_start()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * i2400mu_rx_release() called from i2400mu_bus_dev_stop()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #include <linux/usb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #include "i2400m-usb.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define D_SUBMODULE rx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #include "usb-debug-levels.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * Dynamic RX size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * We can't let the rx_size be a multiple of 512 bytes (the RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * endpoint's max packet size). On some USB host controllers (we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * haven't been able to fully characterize which), if the device is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * about to send (for example) X bytes and we only post a buffer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * receive n*512, it will fail to mark that as babble (so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * i2400mu_rx() [case -EOVERFLOW] can resize the buffer and get the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * rest).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * So on growing or shrinking, if it is a multiple of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * maxpacketsize, we remove some (instead of incresing some, so in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * buddy allocator we try to waste less space).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * Note we also need a hook for this on i2400mu_rx() -- when we do the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * first read, we are sure we won't hit this spot because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * i240mm->rx_size has been set properly. However, if we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * double because of -EOVERFLOW, when we launch the read to get the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * rest of the data, we *have* to make sure that also is not a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * multiple of the max_pkt_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) size_t i2400mu_rx_size_grow(struct i2400mu *i2400mu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct device *dev = &i2400mu->usb_iface->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) size_t rx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) const size_t max_pkt_size = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) rx_size = 2 * i2400mu->rx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (rx_size % max_pkt_size == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) rx_size -= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) d_printf(1, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) "RX: expected size grew to %zu [adjusted -8] "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) "from %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) rx_size, i2400mu->rx_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) d_printf(1, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) "RX: expected size grew to %zu from %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) rx_size, i2400mu->rx_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return rx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) void i2400mu_rx_size_maybe_shrink(struct i2400mu *i2400mu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) const size_t max_pkt_size = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct device *dev = &i2400mu->usb_iface->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (unlikely(i2400mu->rx_size_cnt >= 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) && i2400mu->rx_size_auto_shrink)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) size_t avg_rx_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) i2400mu->rx_size_acc / i2400mu->rx_size_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) size_t new_rx_size = i2400mu->rx_size / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (avg_rx_size < new_rx_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (new_rx_size % max_pkt_size == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) new_rx_size -= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) d_printf(1, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) "RX: expected size shrank to %zu "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) "[adjusted -8] from %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) new_rx_size, i2400mu->rx_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) d_printf(1, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) "RX: expected size shrank to %zu "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) "from %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) new_rx_size, i2400mu->rx_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) i2400mu->rx_size = new_rx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) i2400mu->rx_size_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) i2400mu->rx_size_acc = i2400mu->rx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * Receive a message with payloads from the USB bus into an skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * @i2400mu: USB device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * @rx_skb: skb where to place the received message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * Deals with all the USB-specifics of receiving, dynamically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * increasing the buffer size if so needed. Returns the payload in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * skb, ready to process. On a zero-length packet, we retry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * On soft USB errors, we retry (until they become too frequent and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * then are promoted to hard); on hard USB errors, we reset the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * device. On other errors (skb realloacation, we just drop it and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * hope for the next invocation to solve it).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * Returns: pointer to the skb if ok, ERR_PTR on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * NOTE: this function might realloc the skb (if it is too small),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * so always update with the one returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * ERR_PTR() is < 0 on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * Will return NULL if it cannot reallocate -- this can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * considered a transient retryable error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct device *dev = &i2400mu->usb_iface->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int usb_pipe, read_size, rx_size, do_autopm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct usb_endpoint_descriptor *epd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) const size_t max_pkt_size = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) do_autopm = atomic_read(&i2400mu->do_autopm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) result = do_autopm ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) usb_autopm_get_interface(i2400mu->usb_iface) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (result < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) dev_err(dev, "RX: can't get autopm: %d\n", result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) do_autopm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) usb_pipe = usb_rcvbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) rx_size = skb_end_pointer(rx_skb) - rx_skb->data - rx_skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (unlikely(rx_size % max_pkt_size == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) rx_size -= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) d_printf(1, dev, "RX: rx_size adapted to %d [-8]\n", rx_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) result = usb_bulk_msg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) i2400mu->usb_dev, usb_pipe, rx_skb->data + rx_skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) rx_size, &read_size, 200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) usb_mark_last_busy(i2400mu->usb_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) switch (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (read_size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) goto retry; /* ZLP, just resubmit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) skb_put(rx_skb, read_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) case -EPIPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * Stall -- maybe the device is choking with our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * requests. Clear it and give it some time. If they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * happen to often, it might be another symptom, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * No error handling for usb_clear_halt(0; if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * works, the retry works; if it fails, this switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * does the error handling for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (edc_inc(&i2400mu->urb_edc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) dev_err(dev, "BM-CMD: too many stalls in "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) "URB; resetting device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) goto do_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) usb_clear_halt(i2400mu->usb_dev, usb_pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) msleep(10); /* give the device some time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) case -EINVAL: /* while removing driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) case -ENODEV: /* dev disconnect ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) case -ENOENT: /* just ignore it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) case -ESHUTDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) case -ECONNRESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) case -EOVERFLOW: { /* too small, reallocate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct sk_buff *new_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) rx_size = i2400mu_rx_size_grow(i2400mu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (rx_size <= (1 << 16)) /* cap it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) i2400mu->rx_size = rx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) else if (printk_ratelimit()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) dev_err(dev, "BUG? rx_size up to %d\n", rx_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) result = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) skb_put(rx_skb, read_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) new_skb = skb_copy_expand(rx_skb, 0, rx_size - rx_skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (new_skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) kfree_skb(rx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) rx_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) goto out; /* drop it...*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) kfree_skb(rx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) rx_skb = new_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) i2400mu->rx_size_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) i2400mu->rx_size_acc = i2400mu->rx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) d_printf(1, dev, "RX: size changed to %d, received %d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) "copied %d, capacity %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) rx_size, read_size, rx_skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) (long) skb_end_offset(new_skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /* In most cases, it happens due to the hardware scheduling a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * read when there was no data - unfortunately, we have no way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * to tell this timeout from a USB timeout. So we just ignore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) case -ETIMEDOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) dev_err(dev, "RX: timeout: %d\n", result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) default: /* Any error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (edc_inc(&i2400mu->urb_edc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) goto error_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) dev_err(dev, "RX: error receiving URB: %d, retrying\n", result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (do_autopm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) usb_autopm_put_interface(i2400mu->usb_iface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) d_fnend(4, dev, "(i2400mu %p) = %p\n", i2400mu, rx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return rx_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) error_reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) dev_err(dev, "RX: maximum errors in URB exceeded; "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) "resetting device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) do_reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) usb_queue_reset_device(i2400mu->usb_iface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) rx_skb = ERR_PTR(result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * Kernel thread for USB reception of data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * This thread waits for a kick; once kicked, it will allocate an skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * and receive a single message to it from USB (using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * i2400mu_rx()). Once received, it is passed to the generic i2400m RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * code for processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * When done processing, it runs some dirty statistics to verify if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * the last 100 messages received were smaller than half of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * current RX buffer size. In that case, the RX buffer size is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * halved. This will helps lowering the pressure on the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * allocator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * Hard errors force the thread to exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) int i2400mu_rxd(void *_i2400mu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct i2400mu *i2400mu = _i2400mu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct i2400m *i2400m = &i2400mu->i2400m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct device *dev = &i2400mu->usb_iface->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct net_device *net_dev = i2400m->wimax_dev.net_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) size_t pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) int rx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct sk_buff *rx_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) spin_lock_irqsave(&i2400m->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) BUG_ON(i2400mu->rx_kthread != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) i2400mu->rx_kthread = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) spin_unlock_irqrestore(&i2400m->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) d_printf(2, dev, "RX: waiting for messages\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) wait_event_interruptible(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) i2400mu->rx_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) (kthread_should_stop() /* check this first! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) || (pending = atomic_read(&i2400mu->rx_pending_count)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (kthread_should_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (pending == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) rx_size = i2400mu->rx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) d_printf(2, dev, "RX: reading up to %d bytes\n", rx_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) rx_skb = __netdev_alloc_skb(net_dev, rx_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (rx_skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) dev_err(dev, "RX: can't allocate skb [%d bytes]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) rx_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) msleep(50); /* give it some time? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /* Receive the message with the payloads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) rx_skb = i2400mu_rx(i2400mu, rx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) result = PTR_ERR(rx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (IS_ERR(rx_skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) atomic_dec(&i2400mu->rx_pending_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (rx_skb == NULL || rx_skb->len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /* some "ignorable" condition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) kfree_skb(rx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /* Deliver the message to the generic i2400m code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) i2400mu->rx_size_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) i2400mu->rx_size_acc += rx_skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) result = i2400m_rx(i2400m, rx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (result == -EIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) && edc_inc(&i2400mu->urb_edc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) goto error_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /* Maybe adjust RX buffer size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) i2400mu_rx_size_maybe_shrink(i2400mu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) spin_lock_irqsave(&i2400m->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) i2400mu->rx_kthread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) spin_unlock_irqrestore(&i2400m->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) error_reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) dev_err(dev, "RX: maximum errors in received buffer exceeded; "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) "resetting device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) usb_queue_reset_device(i2400mu->usb_iface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * Start reading from the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * @i2400m: device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * Notify the RX thread that there is data pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) void i2400mu_rx_kick(struct i2400mu *i2400mu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct i2400m *i2400m = &i2400mu->i2400m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct device *dev = &i2400mu->usb_iface->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) d_fnstart(3, dev, "(i2400mu %p)\n", i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) atomic_inc(&i2400mu->rx_pending_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) wake_up_all(&i2400mu->rx_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) int i2400mu_rx_setup(struct i2400mu *i2400mu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct i2400m *i2400m = &i2400mu->i2400m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct device *dev = &i2400mu->usb_iface->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct task_struct *kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) kthread = kthread_run(i2400mu_rxd, i2400mu, "%s-rx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) wimax_dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /* the kthread function sets i2400mu->rx_thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (IS_ERR(kthread)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) result = PTR_ERR(kthread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) dev_err(dev, "RX: cannot start thread: %d\n", result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) void i2400mu_rx_release(struct i2400mu *i2400mu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct i2400m *i2400m = &i2400mu->i2400m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct device *dev = i2400m_dev(i2400m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct task_struct *kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) spin_lock_irqsave(&i2400m->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) kthread = i2400mu->rx_kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) i2400mu->rx_kthread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) spin_unlock_irqrestore(&i2400m->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (kthread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) kthread_stop(kthread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) d_printf(1, dev, "RX: kthread had already exited\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)