Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * USB Network driver infrastructure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (C) 2000-2005 by David Brownell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * This is a generic "USB networking" framework that works with several
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * kinds of full and high speed networking devices:  host-to-host cables,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * smart usb peripherals, and actual Ethernet adapters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * These devices usually differ in terms of control protocols (if they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * even have one!) and sometimes they define new framing to wrap or batch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * Ethernet packets.  Otherwise, they talk to USB pretty much the same,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * so interface (un)binding, endpoint I/O queues, fault handling, and other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  * issues can usefully be addressed by this framework.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) // #define	DEBUG			// error path messages, extra info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) // #define	VERBOSE			// more; success messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/mii.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/usb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/usb/usbnet.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  * Nineteen USB 1.1 max size bulk transactions per frame (ms), max.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  * Several dozen bytes of IPv4 data can fit in two such transactions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  * One maximum size Ethernet packet takes twenty four of them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  * For high speed, each frame comfortably fits almost 36 max size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  * Ethernet packets (so queues should be bigger).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  * The goal is to let the USB host controller be busy for 5msec or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  * more before an irq is required, under load.  Jumbograms change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  * the equation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #define	MAX_QUEUE_MEMORY	(60 * 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define	RX_QLEN(dev)		((dev)->rx_qlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define	TX_QLEN(dev)		((dev)->tx_qlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) // reawaken network queue this soon after stopping; else watchdog barks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define TX_TIMEOUT_JIFFIES	(5*HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) /* throttle rx/tx briefly after some faults, so hub_wq might disconnect()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58)  * us (it polls at HZ/4 usually) before we report too many false errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define THROTTLE_JIFFIES	(HZ/8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) // between wakeups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define UNLINK_TIMEOUT_MS	3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) // randomly generated ethernet address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) static u8	node_id [ETH_ALEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) /* use ethtool to change the level for any given device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) static int msg_level = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) module_param (msg_level, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) MODULE_PARM_DESC (msg_level, "Override default message level");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) /* handles CDC Ethernet and many other network "bulk data" interfaces */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	int				tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	struct usb_host_interface	*alt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	struct usb_host_endpoint	*in = NULL, *out = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	struct usb_host_endpoint	*status = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		unsigned	ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		in = out = status = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 		alt = intf->altsetting + tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 		/* take the first altsetting with in-bulk + out-bulk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 		 * remember any status endpoint, just in case;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 		 * ignore other endpoints and altsettings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 		for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 			struct usb_host_endpoint	*e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 			int				intr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 			e = alt->endpoint + ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 			/* ignore endpoints which cannot transfer data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 			if (!usb_endpoint_maxp(&e->desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 			switch (e->desc.bmAttributes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 			case USB_ENDPOINT_XFER_INT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 				if (!usb_endpoint_dir_in(&e->desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 				intr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 				fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 			case USB_ENDPOINT_XFER_BULK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 			if (usb_endpoint_dir_in(&e->desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 				if (!intr && !in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 					in = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 				else if (intr && !status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 					status = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 				if (!out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 					out = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 		if (in && out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	if (!alt || !in || !out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	if (alt->desc.bAlternateSetting != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	    !(dev->driver_info->flags & FLAG_NO_SETINT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		tmp = usb_set_interface (dev->udev, alt->desc.bInterfaceNumber,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 				alt->desc.bAlternateSetting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		if (tmp < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 			return tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	dev->in = usb_rcvbulkpipe (dev->udev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 			in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	dev->out = usb_sndbulkpipe (dev->udev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 			out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	dev->status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) EXPORT_SYMBOL_GPL(usbnet_get_endpoints);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	int 		tmp = -1, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	unsigned char	buf [13];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	ret = usb_string(dev->udev, iMACAddress, buf, sizeof buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	if (ret == 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		tmp = hex2bin(dev->net->dev_addr, buf, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	if (tmp < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		dev_dbg(&dev->udev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 			"bad MAC string %d fetch, %d\n", iMACAddress, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 		if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) static void intr_complete (struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	struct usbnet	*dev = urb->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	int		status = urb->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	/* success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		dev->driver_info->status(dev, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	/* software-driven interface shutdown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	case -ENOENT:		/* urb killed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	case -ESHUTDOWN:	/* hardware gone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		netif_dbg(dev, ifdown, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 			  "intr shutdown, code %d\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	/* NOTE:  not throttling like RX/TX, since this endpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	 * already polls infrequently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		netdev_dbg(dev->net, "intr status %d\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	status = usb_submit_urb (urb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	if (status != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		netif_err(dev, timer, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 			  "intr resubmit --> %d\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) static int init_status (struct usbnet *dev, struct usb_interface *intf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	char		*buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	unsigned	pipe = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	unsigned	maxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	unsigned	period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	if (!dev->driver_info->status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	pipe = usb_rcvintpipe (dev->udev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 			dev->status->desc.bEndpointAddress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 				& USB_ENDPOINT_NUMBER_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	maxp = usb_maxpacket (dev->udev, pipe, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	/* avoid 1 msec chatter:  min 8 msec poll rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	period = max ((int) dev->status->desc.bInterval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		(dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	buf = kmalloc (maxp, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	if (buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		dev->interrupt = usb_alloc_urb (0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		if (!dev->interrupt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 			kfree (buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 			usb_fill_int_urb(dev->interrupt, dev->udev, pipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 				buf, maxp, intr_complete, dev, period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 			dev->interrupt->transfer_flags |= URB_FREE_BUFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 			dev_dbg(&intf->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 				"status ep%din, %d bytes period %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 				usb_pipeendpoint(pipe), maxp, period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) /* Submit the interrupt URB if not previously submitted, increasing refcount */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	WARN_ON_ONCE(dev->interrupt == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	if (dev->interrupt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		mutex_lock(&dev->interrupt_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		if (++dev->interrupt_count == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 			ret = usb_submit_urb(dev->interrupt, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		dev_dbg(&dev->udev->dev, "incremented interrupt URB count to %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 			dev->interrupt_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 		mutex_unlock(&dev->interrupt_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) EXPORT_SYMBOL_GPL(usbnet_status_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) /* For resume; submit interrupt URB if previously submitted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) static int __usbnet_status_start_force(struct usbnet *dev, gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	mutex_lock(&dev->interrupt_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	if (dev->interrupt_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		ret = usb_submit_urb(dev->interrupt, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		dev_dbg(&dev->udev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 			"submitted interrupt URB for resume\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	mutex_unlock(&dev->interrupt_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) /* Kill the interrupt URB if all submitters want it killed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) void usbnet_status_stop(struct usbnet *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	if (dev->interrupt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		mutex_lock(&dev->interrupt_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		WARN_ON(dev->interrupt_count == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		if (dev->interrupt_count && --dev->interrupt_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 			usb_kill_urb(dev->interrupt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		dev_dbg(&dev->udev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 			"decremented interrupt URB count to %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 			dev->interrupt_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		mutex_unlock(&dev->interrupt_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) EXPORT_SYMBOL_GPL(usbnet_status_stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) /* For suspend; always kill interrupt URB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) static void __usbnet_status_stop_force(struct usbnet *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	if (dev->interrupt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		mutex_lock(&dev->interrupt_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		usb_kill_urb(dev->interrupt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		dev_dbg(&dev->udev->dev, "killed interrupt URB for suspend\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		mutex_unlock(&dev->interrupt_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) /* Passes this packet up the stack, updating its accounting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302)  * Some link protocols batch packets, so their rx_fixup paths
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303)  * can return clones as well as just modify the original skb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	int	status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		skb_queue_tail(&dev->rxq_pause, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	/* only update if unset to allow minidriver rx_fixup override */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	if (skb->protocol == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		skb->protocol = eth_type_trans (skb, dev->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	flags = u64_stats_update_begin_irqsave(&stats64->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	stats64->rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	stats64->rx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	u64_stats_update_end_irqrestore(&stats64->syncp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		  skb->len + sizeof (struct ethhdr), skb->protocol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	memset (skb->cb, 0, sizeof (struct skb_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	if (skb_defer_rx_timestamp(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	status = netif_rx (skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	if (status != NET_RX_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 		netif_dbg(dev, rx_err, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 			  "netif_rx status %d\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) EXPORT_SYMBOL_GPL(usbnet_skb_return);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) /* must be called if hard_mtu or rx_urb_size changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) void usbnet_update_max_qlen(struct usbnet *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	enum usb_device_speed speed = dev->udev->speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	if (!dev->rx_urb_size || !dev->hard_mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		goto insanity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	switch (speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	case USB_SPEED_HIGH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		dev->tx_qlen = MAX_QUEUE_MEMORY / dev->hard_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	case USB_SPEED_SUPER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	case USB_SPEED_SUPER_PLUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		 * Not take default 5ms qlen for super speed HC to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		 * save memory, and iperf tests show 2.5ms qlen can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		 * work well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		dev->rx_qlen = 5 * MAX_QUEUE_MEMORY / dev->rx_urb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) insanity:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		dev->rx_qlen = dev->tx_qlen = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) EXPORT_SYMBOL_GPL(usbnet_update_max_qlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) /*-------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371)  * Network Device Driver (peer link to "Host Device", from USB host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373)  *-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) int usbnet_change_mtu (struct net_device *net, int new_mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	struct usbnet	*dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	int		ll_mtu = new_mtu + net->hard_header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	int		old_hard_mtu = dev->hard_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	int		old_rx_urb_size = dev->rx_urb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	// no second zero-length packet read wanted after mtu-sized packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	if ((ll_mtu % dev->maxpacket) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		return -EDOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	net->mtu = new_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	dev->hard_mtu = net->mtu + net->hard_header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	if (dev->rx_urb_size == old_hard_mtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		dev->rx_urb_size = dev->hard_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		if (dev->rx_urb_size > old_rx_urb_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 			usbnet_pause_rx(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 			usbnet_unlink_rx_urbs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 			usbnet_resume_rx(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	/* max qlen depend on hard_mtu and rx_urb_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	usbnet_update_max_qlen(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) EXPORT_SYMBOL_GPL(usbnet_change_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) /* The caller must hold list->lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) static void __usbnet_queue_skb(struct sk_buff_head *list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 			struct sk_buff *newsk, enum skb_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	struct skb_data *entry = (struct skb_data *) newsk->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	__skb_queue_tail(list, newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	entry->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) /* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417)  * completion callbacks.  2.5 should have fixed those bugs...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		struct sk_buff_head *list, enum skb_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	enum skb_state 		old_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	struct skb_data *entry = (struct skb_data *) skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	spin_lock_irqsave(&list->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	old_state = entry->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	entry->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	__skb_unlink(skb, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	/* defer_bh() is never called with list == &dev->done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	 * spin_lock_nested() tells lockdep that it is OK to take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	 * dev->done.lock here with list->lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	spin_lock_nested(&dev->done.lock, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	__skb_queue_tail(&dev->done, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	if (dev->done.qlen == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		tasklet_schedule(&dev->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	spin_unlock(&dev->done.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	spin_unlock_irqrestore(&list->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	return old_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) /* some work can't be done in tasklets, so we use keventd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448)  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449)  * but tasklet_schedule() doesn't.  hope the failure is rare.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) void usbnet_defer_kevent (struct usbnet *dev, int work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	set_bit (work, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	if (!schedule_work (&dev->kevent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		netdev_dbg(dev->net, "kevent %d may have been dropped\n", work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		netdev_dbg(dev->net, "kevent %d scheduled\n", work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) static void rx_complete (struct urb *urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	struct sk_buff		*skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	struct skb_data		*entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	int			retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	unsigned long		lockflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	size_t			size = dev->rx_urb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	/* prevent rx skb allocation when error ratio is high */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	if (test_bit(EVENT_RX_KILL, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		usb_free_urb(urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		return -ENOLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	if (test_bit(EVENT_NO_IP_ALIGN, &dev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		skb = __netdev_alloc_skb(dev->net, size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		usb_free_urb (urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	entry = (struct skb_data *) skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	entry->urb = urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	entry->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	entry->length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	usb_fill_bulk_urb (urb, dev->udev, dev->in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		skb->data, size, rx_complete, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	spin_lock_irqsave (&dev->rxq.lock, lockflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	if (netif_running (dev->net) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	    netif_device_present (dev->net) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	    test_bit(EVENT_DEV_OPEN, &dev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	    !test_bit (EVENT_RX_HALT, &dev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	    !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		case -EPIPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 			usbnet_defer_kevent (dev, EVENT_RX_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		case -ENOMEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 			usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		case -ENODEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 			netif_device_detach (dev->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		case -EHOSTUNREACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 			retval = -ENOLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 			netif_dbg(dev, rx_err, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 				  "rx submit, %d\n", retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 			tasklet_schedule (&dev->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 			__usbnet_queue_skb(&dev->rxq, skb, rx_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		retval = -ENOLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		dev_kfree_skb_any (skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		usb_free_urb (urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	if (dev->driver_info->rx_fixup &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	    !dev->driver_info->rx_fixup (dev, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		/* With RX_ASSEMBLE, rx_fixup() must update counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		if (!(dev->driver_info->flags & FLAG_RX_ASSEMBLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 			dev->net->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	// else network stack removes extra byte if we forced a short packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	/* all data was already cloned from skb inside the driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	if (dev->driver_info->flags & FLAG_MULTI_PACKET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	if (skb->len < ETH_HLEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		dev->net->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		dev->net->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		usbnet_skb_return(dev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	skb_queue_tail(&dev->done, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) static void rx_complete (struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	struct sk_buff		*skb = (struct sk_buff *) urb->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	struct skb_data		*entry = (struct skb_data *) skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	struct usbnet		*dev = entry->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	int			urb_status = urb->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	enum skb_state		state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	skb_put (skb, urb->actual_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	state = rx_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	entry->urb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	switch (urb_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	/* success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	/* stalls need manual reset. this is rare ... except that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	 * when going through USB 2.0 TTs, unplug appears this way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	 * we avoid the highspeed version of the ETIMEDOUT/EILSEQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	 * storm, recovering as needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	case -EPIPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		dev->net->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		usbnet_defer_kevent (dev, EVENT_RX_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	/* software-driven interface shutdown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	case -ECONNRESET:		/* async unlink */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	case -ESHUTDOWN:		/* hardware gone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		netif_dbg(dev, ifdown, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 			  "rx shutdown, code %d\n", urb_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		goto block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	/* we get controller i/o faults during hub_wq disconnect() delays.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	 * throttle down resubmits, to avoid log floods; just temporarily,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	 * so we still recover when the fault isn't a hub_wq delay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	case -EPROTO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	case -ETIME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	case -EILSEQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		dev->net->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		if (!timer_pending (&dev->delay)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 			mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 			netif_dbg(dev, link, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 				  "rx throttle %d\n", urb_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) block:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		state = rx_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		entry->urb = urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		urb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	/* data overrun ... flush fifo? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	case -EOVERFLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		dev->net->stats.rx_over_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		state = rx_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		dev->net->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	/* stop rx if packet error rate is high */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	if (++dev->pkt_cnt > 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		dev->pkt_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		dev->pkt_err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		if (state == rx_cleanup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 			dev->pkt_err++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		if (dev->pkt_err > 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 			set_bit(EVENT_RX_KILL, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	state = defer_bh(dev, skb, &dev->rxq, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	if (urb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		if (netif_running (dev->net) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		    !test_bit (EVENT_RX_HALT, &dev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		    state != unlink_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 			rx_submit (dev, urb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 			usb_mark_last_busy(dev->udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		usb_free_urb (urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) void usbnet_pause_rx(struct usbnet *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	set_bit(EVENT_RX_PAUSED, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	netif_dbg(dev, rx_status, dev->net, "paused rx queue enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) EXPORT_SYMBOL_GPL(usbnet_pause_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) void usbnet_resume_rx(struct usbnet *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	int num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	clear_bit(EVENT_RX_PAUSED, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	while ((skb = skb_dequeue(&dev->rxq_pause)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		usbnet_skb_return(dev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	tasklet_schedule(&dev->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	netif_dbg(dev, rx_status, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		  "paused rx queue disabled, %d skbs requeued\n", num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) EXPORT_SYMBOL_GPL(usbnet_resume_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) void usbnet_purge_paused_rxq(struct usbnet *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	skb_queue_purge(&dev->rxq_pause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) // unlink pending rx/tx; completion handlers do all other cleanup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	struct sk_buff		*skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	int			count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	spin_lock_irqsave (&q->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	while (!skb_queue_empty(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		struct skb_data		*entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		struct urb		*urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		int			retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		skb_queue_walk(q, skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 			entry = (struct skb_data *) skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 			if (entry->state != unlink_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 				goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		entry->state = unlink_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		urb = entry->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		 * Get reference count of the URB to avoid it to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		 * freed during usb_unlink_urb, which may trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		 * use-after-free problem inside usb_unlink_urb since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		 * usb_unlink_urb is always racing with .complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		 * handler(include defer_bh).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		usb_get_urb(urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		spin_unlock_irqrestore(&q->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		// during some PM-driven resume scenarios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		// these (async) unlinks complete immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		retval = usb_unlink_urb (urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		if (retval != -EINPROGRESS && retval != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 			netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 			count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		usb_put_urb(urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		spin_lock_irqsave(&q->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	spin_unlock_irqrestore (&q->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) // Flush all pending rx urbs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) // minidrivers may need to do this when the MTU changes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) void usbnet_unlink_rx_urbs(struct usbnet *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	if (netif_running(dev->net)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		(void) unlink_urbs (dev, &dev->rxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		tasklet_schedule(&dev->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) static void wait_skb_queue_empty(struct sk_buff_head *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	spin_lock_irqsave(&q->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	while (!skb_queue_empty(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		spin_unlock_irqrestore(&q->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		set_current_state(TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		spin_lock_irqsave(&q->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	spin_unlock_irqrestore(&q->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) // precondition: never called in_interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) static void usbnet_terminate_urbs(struct usbnet *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	DECLARE_WAITQUEUE(wait, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	int temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	/* ensure there are no more active urbs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	add_wait_queue(&dev->wait, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	set_current_state(TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	temp = unlink_urbs(dev, &dev->txq) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		unlink_urbs(dev, &dev->rxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	/* maybe wait for deletions to finish. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	wait_skb_queue_empty(&dev->rxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	wait_skb_queue_empty(&dev->txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	wait_skb_queue_empty(&dev->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	netif_dbg(dev, ifdown, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		  "waited for %d urb completions\n", temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	remove_wait_queue(&dev->wait, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) int usbnet_stop (struct net_device *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	struct usbnet		*dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	const struct driver_info *info = dev->driver_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	int			retval, pm, mpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	netif_stop_queue (net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	netif_info(dev, ifdown, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		   net->stats.rx_packets, net->stats.tx_packets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		   net->stats.rx_errors, net->stats.tx_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	/* to not race resume */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	pm = usb_autopm_get_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	/* allow minidriver to stop correctly (wireless devices to turn off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	 * radio etc) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	if (info->stop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		retval = info->stop(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 			netif_info(dev, ifdown, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 				   "stop fail (%d) usbnet usb-%s-%s, %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 				   retval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 				   dev->udev->bus->bus_name, dev->udev->devpath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 				   info->description);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	if (!(info->flags & FLAG_AVOID_UNLINK_URBS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		usbnet_terminate_urbs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	usbnet_status_stop(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	usbnet_purge_paused_rxq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	/* deferred work (task, timer, softirq) must also stop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	 * can't flush_scheduled_work() until we drop rtnl (later),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	 * else workers could deadlock; so make workers a NOP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	dev->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	del_timer_sync (&dev->delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	tasklet_kill (&dev->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	if (!pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	if (info->manage_power && mpn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		info->manage_power(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) EXPORT_SYMBOL_GPL(usbnet_stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) // posts reads, and enables write queuing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) // precondition: never called in_interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) int usbnet_open (struct net_device *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	struct usbnet		*dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	int			retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	const struct driver_info *info = dev->driver_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	if ((retval = usb_autopm_get_interface(dev->intf)) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		netif_info(dev, ifup, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 			   "resumption fail (%d) usbnet usb-%s-%s, %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 			   retval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 			   dev->udev->bus->bus_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 			   dev->udev->devpath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 			   info->description);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		goto done_nopm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	// put into "known safe" state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	if (info->reset && (retval = info->reset (dev)) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		netif_info(dev, ifup, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 			   "open reset fail (%d) usbnet usb-%s-%s, %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 			   retval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 			   dev->udev->bus->bus_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 			   dev->udev->devpath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 			   info->description);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	/* hard_mtu or rx_urb_size may change in reset() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	usbnet_update_max_qlen(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	// insist peer be connected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	if (info->check_connect && (retval = info->check_connect (dev)) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		netif_dbg(dev, ifup, dev->net, "can't open; %d\n", retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	/* start any status interrupt transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	if (dev->interrupt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		retval = usbnet_status_start(dev, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		if (retval < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 			netif_err(dev, ifup, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 				  "intr submit %d\n", retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	set_bit(EVENT_DEV_OPEN, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	netif_start_queue (net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	netif_info(dev, ifup, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		   "open: enable queueing (rx %d, tx %d) mtu %d %s framing\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		   (int)RX_QLEN(dev), (int)TX_QLEN(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		   dev->net->mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		   (dev->driver_info->flags & FLAG_FRAMING_NC) ? "NetChip" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		   (dev->driver_info->flags & FLAG_FRAMING_GL) ? "GeneSys" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		   (dev->driver_info->flags & FLAG_FRAMING_Z) ? "Zaurus" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		   (dev->driver_info->flags & FLAG_FRAMING_RN) ? "RNDIS" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		   (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		   "simple");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	/* reset rx error state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	dev->pkt_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	dev->pkt_err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	clear_bit(EVENT_RX_KILL, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	// delay posting reads until we're fully open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	tasklet_schedule (&dev->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	if (info->manage_power) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		retval = info->manage_power(dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		if (retval < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 			retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 			set_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 			usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) done_nopm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) EXPORT_SYMBOL_GPL(usbnet_open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) /* ethtool methods; minidrivers may need to add some more, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944)  * they'll probably want to use this base set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) int usbnet_get_link_ksettings(struct net_device *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 			      struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	struct usbnet *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	if (!dev->mii.mdio_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	mii_ethtool_get_link_ksettings(&dev->mii, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) EXPORT_SYMBOL_GPL(usbnet_get_link_ksettings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) int usbnet_set_link_ksettings(struct net_device *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 			      const struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	struct usbnet *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	if (!dev->mii.mdio_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	retval = mii_ethtool_set_link_ksettings(&dev->mii, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	/* link speed/duplex might have changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	if (dev->driver_info->link_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		dev->driver_info->link_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	/* hard_mtu or rx_urb_size may change in link_reset() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	usbnet_update_max_qlen(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) EXPORT_SYMBOL_GPL(usbnet_set_link_ksettings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) void usbnet_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	struct usbnet *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	netdev_stats_to_stats64(stats, &net->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	dev_fetch_sw_netstats(stats, dev->stats64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) EXPORT_SYMBOL_GPL(usbnet_get_stats64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) u32 usbnet_get_link (struct net_device *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	struct usbnet *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	/* If a check_connect is defined, return its result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	if (dev->driver_info->check_connect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		return dev->driver_info->check_connect (dev) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	/* if the device has mii operations, use those */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	if (dev->mii.mdio_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		return mii_link_ok(&dev->mii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	/* Otherwise, dtrt for drivers calling netif_carrier_{on,off} */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	return ethtool_op_get_link(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) EXPORT_SYMBOL_GPL(usbnet_get_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) int usbnet_nway_reset(struct net_device *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	struct usbnet *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	if (!dev->mii.mdio_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	return mii_nway_restart(&dev->mii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) EXPORT_SYMBOL_GPL(usbnet_nway_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	struct usbnet *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	strlcpy (info->driver, dev->driver_name, sizeof info->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	strlcpy (info->fw_version, dev->driver_info->description,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		sizeof info->fw_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) EXPORT_SYMBOL_GPL(usbnet_get_drvinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) u32 usbnet_get_msglevel (struct net_device *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	struct usbnet *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	return dev->msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) EXPORT_SYMBOL_GPL(usbnet_get_msglevel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) void usbnet_set_msglevel (struct net_device *net, u32 level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	struct usbnet *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	dev->msg_enable = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) EXPORT_SYMBOL_GPL(usbnet_set_msglevel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) /* drivers may override default ethtool_ops in their bind() routine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) static const struct ethtool_ops usbnet_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	.get_link		= usbnet_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	.nway_reset		= usbnet_nway_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	.get_drvinfo		= usbnet_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	.get_msglevel		= usbnet_get_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	.set_msglevel		= usbnet_set_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	.get_ts_info		= ethtool_op_get_ts_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	.get_link_ksettings	= usbnet_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	.set_link_ksettings	= usbnet_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) static void __handle_link_change(struct usbnet *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	if (!test_bit(EVENT_DEV_OPEN, &dev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	if (!netif_carrier_ok(dev->net)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		/* kill URBs for reading packets to save bus bandwidth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		unlink_urbs(dev, &dev->rxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		 * tx_timeout will unlink URBs for sending packets and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		 * tx queue is stopped by netcore after link becomes off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		/* submitting URBs for reading packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		tasklet_schedule(&dev->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	/* hard_mtu or rx_urb_size may change during link change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	usbnet_update_max_qlen(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	clear_bit(EVENT_LINK_CHANGE, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) void usbnet_set_rx_mode(struct net_device *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	struct usbnet		*dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	usbnet_defer_kevent(dev, EVENT_SET_RX_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) EXPORT_SYMBOL_GPL(usbnet_set_rx_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) static void __handle_set_rx_mode(struct usbnet *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	if (dev->driver_info->set_rx_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		(dev->driver_info->set_rx_mode)(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	clear_bit(EVENT_SET_RX_MODE, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) /* work that cannot be done in interrupt context uses keventd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)  * NOTE:  with 2.5 we could do more of this using completion callbacks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)  * especially now that control transfers can be queued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) usbnet_deferred_kevent (struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	struct usbnet		*dev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		container_of(work, struct usbnet, kevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	int			status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	/* usb_clear_halt() needs a thread context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	if (test_bit (EVENT_TX_HALT, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		unlink_urbs (dev, &dev->txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		status = usb_autopm_get_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 			goto fail_pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		status = usb_clear_halt (dev->udev, dev->out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		if (status < 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		    status != -EPIPE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		    status != -ESHUTDOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 			if (netif_msg_tx_err (dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) fail_pipe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 				netdev_err(dev->net, "can't clear tx halt, status %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 					   status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 			clear_bit (EVENT_TX_HALT, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 			if (status != -ESHUTDOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 				netif_wake_queue (dev->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	if (test_bit (EVENT_RX_HALT, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		unlink_urbs (dev, &dev->rxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		status = usb_autopm_get_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 			goto fail_halt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		status = usb_clear_halt (dev->udev, dev->in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		if (status < 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		    status != -EPIPE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		    status != -ESHUTDOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 			if (netif_msg_rx_err (dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) fail_halt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 				netdev_err(dev->net, "can't clear rx halt, status %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 					   status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 			clear_bit (EVENT_RX_HALT, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 			tasklet_schedule (&dev->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	/* tasklet could resubmit itself forever if memory is tight */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		struct urb	*urb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		int resched = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		if (netif_running (dev->net))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 			urb = usb_alloc_urb (0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 			clear_bit (EVENT_RX_MEMORY, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		if (urb != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 			clear_bit (EVENT_RX_MEMORY, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 			status = usb_autopm_get_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 			if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 				usb_free_urb(urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 				goto fail_lowmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 			if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 				resched = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 			usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) fail_lowmem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 			if (resched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 				tasklet_schedule (&dev->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	if (test_bit (EVENT_LINK_RESET, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		const struct driver_info *info = dev->driver_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		int			retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		clear_bit (EVENT_LINK_RESET, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		status = usb_autopm_get_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 			goto skip_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		if(info->link_reset && (retval = info->link_reset(dev)) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 			usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) skip_reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 			netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 				    retval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 				    dev->udev->bus->bus_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 				    dev->udev->devpath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 				    info->description);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 			usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		/* handle link change from link resetting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		__handle_link_change(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	if (test_bit (EVENT_LINK_CHANGE, &dev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		__handle_link_change(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	if (test_bit (EVENT_SET_RX_MODE, &dev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		__handle_set_rx_mode(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	if (dev->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 		netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) static void tx_complete (struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	struct sk_buff		*skb = (struct sk_buff *) urb->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	struct skb_data		*entry = (struct skb_data *) skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	struct usbnet		*dev = entry->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	if (urb->status == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		flags = u64_stats_update_begin_irqsave(&stats64->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		stats64->tx_packets += entry->packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		stats64->tx_bytes += entry->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		u64_stats_update_end_irqrestore(&stats64->syncp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		dev->net->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		switch (urb->status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		case -EPIPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 			usbnet_defer_kevent (dev, EVENT_TX_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		/* software-driven interface shutdown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		case -ECONNRESET:		// async unlink
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		case -ESHUTDOWN:		// hardware gone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		/* like rx, tx gets controller i/o faults during hub_wq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		 * delays and so it uses the same throttling mechanism.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		case -EPROTO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 		case -ETIME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		case -EILSEQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 			usb_mark_last_busy(dev->udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 			if (!timer_pending (&dev->delay)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 				mod_timer (&dev->delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 					jiffies + THROTTLE_JIFFIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 				netif_dbg(dev, link, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 					  "tx throttle %d\n", urb->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 			netif_stop_queue (dev->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 			netif_dbg(dev, tx_err, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 				  "tx err %d\n", entry->urb->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	usb_autopm_put_interface_async(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	(void) defer_bh(dev, skb, &dev->txq, tx_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) void usbnet_tx_timeout (struct net_device *net, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	struct usbnet		*dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	unlink_urbs (dev, &dev->txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	tasklet_schedule (&dev->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	/* this needs to be handled individually because the generic layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	 * doesn't know what is sufficient and could not restore private
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	 * information if a remedy of an unconditional reset were used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	if (dev->driver_info->recover)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		(dev->driver_info->recover)(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) EXPORT_SYMBOL_GPL(usbnet_tx_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	unsigned num_sgs, total_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	int i, s = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	num_sgs = skb_shinfo(skb)->nr_frags + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	if (num_sgs == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	/* reserve one for zero packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	urb->sg = kmalloc_array(num_sgs + 1, sizeof(struct scatterlist),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 				GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	if (!urb->sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	urb->num_sgs = num_sgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	sg_init_table(urb->sg, urb->num_sgs + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	sg_set_buf(&urb->sg[s++], skb->data, skb_headlen(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	total_len += skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		total_len += skb_frag_size(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		sg_set_page(&urb->sg[i + s], skb_frag_page(f), skb_frag_size(f),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 			    skb_frag_off(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	urb->transfer_buffer_length = total_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 				     struct net_device *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	struct usbnet		*dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	unsigned int			length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	struct urb		*urb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	struct skb_data		*entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	const struct driver_info *info = dev->driver_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		skb_tx_timestamp(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	// some devices want funky USB-level framing, for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	// win32 driver (usually) and/or hardware quirks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	if (info->tx_fixup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 			/* packet collected; minidriver waiting for more */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 			if (info->flags & FLAG_MULTI_PACKET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 				goto not_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 			netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 			goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 		netif_dbg(dev, tx_err, dev->net, "no urb\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	entry = (struct skb_data *) skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	entry->urb = urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	entry->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	usb_fill_bulk_urb (urb, dev->udev, dev->out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 			skb->data, skb->len, tx_complete, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	if (dev->can_dma_sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		if (build_dma_sg(skb, urb) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 			goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	length = urb->transfer_buffer_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	/* don't assume the hardware handles USB_ZERO_PACKET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	 * NOTE:  strictly conforming cdc-ether devices should expect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	 * the ZLP here, but ignore the one-byte packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	 * NOTE2: CDC NCM specification is different from CDC ECM when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	 * handling ZLP/short packets, so cdc_ncm driver will make short
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	 * packet itself if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	if (length % dev->maxpacket == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		if (!(info->flags & FLAG_SEND_ZLP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 			if (!(info->flags & FLAG_MULTI_PACKET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 				length++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 				if (skb_tailroom(skb) && !urb->num_sgs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 					skb->data[skb->len] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 					__skb_put(skb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 				} else if (urb->num_sgs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 					sg_set_buf(&urb->sg[urb->num_sgs++],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 							dev->padding_pkt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 			urb->transfer_flags |= URB_ZERO_PACKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	urb->transfer_buffer_length = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	if (info->flags & FLAG_MULTI_PACKET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 		/* Driver has set number of packets and a length delta.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		 * Calculate the complete length and ensure that it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 		 * positive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		entry->length += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 		if (WARN_ON_ONCE(entry->length <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 			entry->length = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 		usbnet_set_skb_tx_stats(skb, 1, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	spin_lock_irqsave(&dev->txq.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	retval = usb_autopm_get_interface_async(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	if (retval < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		spin_unlock_irqrestore(&dev->txq.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	if (netif_queue_stopped(net)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 		usb_autopm_put_interface_async(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		spin_unlock_irqrestore(&dev->txq.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	/* if this triggers the device is still a sleep */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		/* transmission will be done in resume */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		usb_anchor_urb(urb, &dev->deferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		/* no use to process more packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		netif_stop_queue(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		usb_put_urb(urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		spin_unlock_irqrestore(&dev->txq.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		goto deferred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	case -EPIPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 		netif_stop_queue (net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 		usbnet_defer_kevent (dev, EVENT_TX_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 		usb_autopm_put_interface_async(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		usb_autopm_put_interface_async(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		netif_dbg(dev, tx_err, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 			  "tx: submit urb err %d\n", retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		netif_trans_update(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		__usbnet_queue_skb(&dev->txq, skb, tx_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 		if (dev->txq.qlen >= TX_QLEN (dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 			netif_stop_queue (net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	spin_unlock_irqrestore (&dev->txq.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		dev->net->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) not_drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 			dev_kfree_skb_any (skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		if (urb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 			kfree(urb->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 			usb_free_urb(urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		netif_dbg(dev, tx_queued, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 			  "> tx, len %u, type 0x%x\n", length, skb->protocol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) deferred:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) EXPORT_SYMBOL_GPL(usbnet_start_xmit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) static int rx_alloc_submit(struct usbnet *dev, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	struct urb	*urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	int		i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	int		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	/* don't refill the queue all at once */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 		urb = usb_alloc_urb(0, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		if (urb != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 			ret = rx_submit(dev, urb, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 				goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) // tasklet (work deferred from completions, in_irq) or timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) static void usbnet_bh (struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	struct usbnet		*dev = from_timer(dev, t, delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	struct sk_buff		*skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	struct skb_data		*entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	while ((skb = skb_dequeue (&dev->done))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 		entry = (struct skb_data *) skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		switch (entry->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 		case rx_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 			entry->state = rx_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 			rx_process (dev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 		case tx_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 			kfree(entry->urb->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		case rx_cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 			usb_free_urb (entry->urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 			dev_kfree_skb (skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 			netdev_dbg(dev->net, "bogus skb state %d\n", entry->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	/* restart RX again after disabling due to high error rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	clear_bit(EVENT_RX_KILL, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	/* waiting for all pending urbs to complete?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	 * only then can we forgo submitting anew
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	if (waitqueue_active(&dev->wait)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 			wake_up_all(&dev->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	// or are we maybe short a few urbs?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	} else if (netif_running (dev->net) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		   netif_device_present (dev->net) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 		   netif_carrier_ok(dev->net) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 		   !timer_pending(&dev->delay) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 		   !test_bit(EVENT_RX_PAUSED, &dev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 		   !test_bit(EVENT_RX_HALT, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 		int	temp = dev->rxq.qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 		if (temp < RX_QLEN(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 			if (rx_alloc_submit(dev, GFP_ATOMIC) == -ENOLINK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 			if (temp != dev->rxq.qlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 				netif_dbg(dev, link, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 					  "rxqlen %d --> %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 					  temp, dev->rxq.qlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 			if (dev->rxq.qlen < RX_QLEN(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 				tasklet_schedule (&dev->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 		if (dev->txq.qlen < TX_QLEN (dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 			netif_wake_queue (dev->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) static void usbnet_bh_tasklet(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	struct timer_list *t = (struct timer_list *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	usbnet_bh(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) /*-------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)  * USB Device Driver support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)  *-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) // precondition: never called in_interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) void usbnet_disconnect (struct usb_interface *intf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	struct usbnet		*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	struct usb_device	*xdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	struct net_device	*net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	dev = usb_get_intfdata(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	usb_set_intfdata(intf, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	xdev = interface_to_usbdev (intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	netif_info(dev, probe, dev->net, "unregister '%s' usb-%s-%s, %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 		   intf->dev.driver->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 		   xdev->bus->bus_name, xdev->devpath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 		   dev->driver_info->description);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	net = dev->net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	unregister_netdev (net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	cancel_work_sync(&dev->kevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	usb_scuttle_anchored_urbs(&dev->deferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	if (dev->driver_info->unbind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		dev->driver_info->unbind (dev, intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	usb_kill_urb(dev->interrupt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	usb_free_urb(dev->interrupt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	kfree(dev->padding_pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	free_percpu(dev->stats64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	free_netdev(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) EXPORT_SYMBOL_GPL(usbnet_disconnect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) static const struct net_device_ops usbnet_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	.ndo_open		= usbnet_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	.ndo_stop		= usbnet_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	.ndo_start_xmit		= usbnet_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	.ndo_tx_timeout		= usbnet_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	.ndo_set_rx_mode	= usbnet_set_rx_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	.ndo_change_mtu		= usbnet_change_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	.ndo_get_stats64	= usbnet_get_stats64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	.ndo_set_mac_address 	= eth_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	.ndo_validate_addr	= eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) // precondition: never called in_interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) static struct device_type wlan_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	.name	= "wlan",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) static struct device_type wwan_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	.name	= "wwan",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	struct usbnet			*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	struct net_device		*net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	struct usb_host_interface	*interface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	const struct driver_info	*info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	struct usb_device		*xdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	int				status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	const char			*name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	struct usb_driver 	*driver = to_usb_driver(udev->dev.driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	/* usbnet already took usb runtime pm, so have to enable the feature
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	 * for usb interface, otherwise usb_autopm_get_interface may return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	 * failure if RUNTIME_PM is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	if (!driver->supports_autosuspend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 		driver->supports_autosuspend = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 		pm_runtime_enable(&udev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	name = udev->dev.driver->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	info = (const struct driver_info *) prod->driver_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	if (!info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 		dev_dbg (&udev->dev, "blacklisted by %s\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	xdev = interface_to_usbdev (udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	interface = udev->cur_altsetting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	// set up our own records
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	net = alloc_etherdev(sizeof(*dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	if (!net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	/* netdev_printk() needs this so do it as early as possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	SET_NETDEV_DEV(net, &udev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	dev->udev = xdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	dev->intf = udev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	dev->driver_info = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	dev->driver_name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	dev->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	if (!dev->stats64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 		goto out0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 				| NETIF_MSG_PROBE | NETIF_MSG_LINK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	init_waitqueue_head(&dev->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	skb_queue_head_init (&dev->rxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	skb_queue_head_init (&dev->txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	skb_queue_head_init (&dev->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	skb_queue_head_init(&dev->rxq_pause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	dev->bh.func = usbnet_bh_tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	dev->bh.data = (unsigned long)&dev->delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	INIT_WORK (&dev->kevent, usbnet_deferred_kevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	init_usb_anchor(&dev->deferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	timer_setup(&dev->delay, usbnet_bh, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	mutex_init (&dev->phy_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	mutex_init(&dev->interrupt_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	dev->interrupt_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	dev->net = net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	strcpy (net->name, "usb%d");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	memcpy (net->dev_addr, node_id, sizeof node_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	/* rx and tx sides can use different message sizes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	 * bind() should set rx_urb_size in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	dev->hard_mtu = net->mtu + net->hard_header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	net->min_mtu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	net->max_mtu = ETH_MAX_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	net->netdev_ops = &usbnet_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	net->ethtool_ops = &usbnet_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	// allow device-specific bind/init procedures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	// NOTE net->name still not usable ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	if (info->bind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 		status = info->bind (dev, udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 		if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 			goto out1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 		// heuristic:  "usb%d" for links we know are two-host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 		// else "eth%d" when there's reasonable doubt.  userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		// can rename the link if it knows better.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 		if ((dev->driver_info->flags & FLAG_ETHER) != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 		    ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 		     (net->dev_addr [0] & 0x02) == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 			strcpy (net->name, "eth%d");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 		/* WLAN devices should always be named "wlan%d" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 		if ((dev->driver_info->flags & FLAG_WLAN) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 			strcpy(net->name, "wlan%d");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 		/* WWAN devices should always be named "wwan%d" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 		if ((dev->driver_info->flags & FLAG_WWAN) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 			strcpy(net->name, "wwan%d");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 		/* devices that cannot do ARP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 		if ((dev->driver_info->flags & FLAG_NOARP) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 			net->flags |= IFF_NOARP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 		/* maybe the remote can't receive an Ethernet MTU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 		if (net->mtu > (dev->hard_mtu - net->hard_header_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 			net->mtu = dev->hard_mtu - net->hard_header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	} else if (!info->in || !info->out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 		status = usbnet_get_endpoints (dev, udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 		dev->in = usb_rcvbulkpipe (xdev, info->in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 		dev->out = usb_sndbulkpipe (xdev, info->out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 		if (!(info->flags & FLAG_NO_SETINT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 			status = usb_set_interface (xdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 				interface->desc.bInterfaceNumber,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 				interface->desc.bAlternateSetting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 			status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	if (status >= 0 && dev->status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 		status = init_status (dev, udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 		goto out3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	if (!dev->rx_urb_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 		dev->rx_urb_size = dev->hard_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	if (dev->maxpacket == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 		/* that is a broken device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 		status = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 		goto out4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	/* let userspace know we have a random address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	if (ether_addr_equal(net->dev_addr, node_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 		net->addr_assign_type = NET_ADDR_RANDOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	if ((dev->driver_info->flags & FLAG_WLAN) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 		SET_NETDEV_DEVTYPE(net, &wlan_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	if ((dev->driver_info->flags & FLAG_WWAN) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 		SET_NETDEV_DEVTYPE(net, &wwan_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	/* initialize max rx_qlen and tx_qlen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	usbnet_update_max_qlen(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	if (dev->can_dma_sg && !(info->flags & FLAG_SEND_ZLP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 		!(info->flags & FLAG_MULTI_PACKET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 		dev->padding_pkt = kzalloc(1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 		if (!dev->padding_pkt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 			status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 			goto out4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	status = register_netdev (net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 		goto out5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 	netif_info(dev, probe, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 		   "register '%s' at usb-%s-%s, %s, %pM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 		   udev->dev.driver->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 		   xdev->bus->bus_name, xdev->devpath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 		   dev->driver_info->description,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 		   net->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	// ok, it's ready to go.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	usb_set_intfdata (udev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	netif_device_attach (net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	if (dev->driver_info->flags & FLAG_LINK_INTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 		usbnet_link_change(dev, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) out5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	kfree(dev->padding_pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) out4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	usb_free_urb(dev->interrupt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) out3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	if (info->unbind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 		info->unbind (dev, udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) out1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	/* subdrivers must undo all they did in bind() if they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	 * fail it, but we may fail later and a deferred kevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	 * may trigger an error resubmitting itself and, worse,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	 * schedule a timer. So we kill it all just in case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	cancel_work_sync(&dev->kevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	del_timer_sync(&dev->delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	free_percpu(dev->stats64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) out0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	free_netdev(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) EXPORT_SYMBOL_GPL(usbnet_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)  * suspend the whole driver as soon as the first interface is suspended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)  * resume only when the last interface is resumed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	struct usbnet		*dev = usb_get_intfdata(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	if (!dev->suspend_count++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 		spin_lock_irq(&dev->txq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 		/* don't autosuspend while transmitting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 		if (dev->txq.qlen && PMSG_IS_AUTO(message)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 			dev->suspend_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 			spin_unlock_irq(&dev->txq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 			return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 			spin_unlock_irq(&dev->txq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 		 * accelerate emptying of the rx and queues, to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 		 * having everything error out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 		netif_device_detach (dev->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 		usbnet_terminate_urbs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		__usbnet_status_stop_force(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 		 * reattach so runtime management can use and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 		 * wake the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 		netif_device_attach (dev->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) EXPORT_SYMBOL_GPL(usbnet_suspend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) int usbnet_resume (struct usb_interface *intf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	struct usbnet		*dev = usb_get_intfdata(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	struct sk_buff          *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	struct urb              *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	int                     retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	if (!--dev->suspend_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 		/* resume interrupt URB if it was previously submitted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 		__usbnet_status_start_force(dev, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 		spin_lock_irq(&dev->txq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 		while ((res = usb_get_from_anchor(&dev->deferred))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 			skb = (struct sk_buff *)res->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 			retval = usb_submit_urb(res, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 			if (retval < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 				dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 				kfree(res->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 				usb_free_urb(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 				usb_autopm_put_interface_async(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 				netif_trans_update(dev->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 				__skb_queue_tail(&dev->txq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 		smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 		spin_unlock_irq(&dev->txq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 			/* handle remote wakeup ASAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 			 * we cannot race against stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 			if (netif_device_present(dev->net) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 				!timer_pending(&dev->delay) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 				!test_bit(EVENT_RX_HALT, &dev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 					rx_alloc_submit(dev, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 			if (!(dev->txq.qlen >= TX_QLEN(dev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 				netif_tx_wake_all_queues(dev->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 			tasklet_schedule (&dev->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	if (test_and_clear_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 		usb_autopm_get_interface_no_resume(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) EXPORT_SYMBOL_GPL(usbnet_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)  * Either a subdriver implements manage_power, then it is assumed to always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)  * be ready to be suspended or it reports the readiness to be suspended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)  * explicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) void usbnet_device_suggests_idle(struct usbnet *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	if (!test_and_set_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 		dev->intf->needs_remote_wakeup = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 		usb_autopm_put_interface_async(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) EXPORT_SYMBOL(usbnet_device_suggests_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)  * For devices that can do without special commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) int usbnet_manage_power(struct usbnet *dev, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	dev->intf->needs_remote_wakeup = on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) EXPORT_SYMBOL(usbnet_manage_power);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) void usbnet_link_change(struct usbnet *dev, bool link, bool need_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	/* update link after link is reseted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	if (link && !need_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 		netif_carrier_on(dev->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 		netif_carrier_off(dev->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	if (need_reset && link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 		usbnet_defer_kevent(dev, EVENT_LINK_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 		usbnet_defer_kevent(dev, EVENT_LINK_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) EXPORT_SYMBOL(usbnet_link_change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 			     u16 value, u16 index, void *data, u16 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	void *buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	netdev_dbg(dev->net, "usbnet_read_cmd cmd=0x%02x reqtype=%02x"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 		   " value=0x%04x index=0x%04x size=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		   cmd, reqtype, value, index, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	if (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 		buf = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 		if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 			      cmd, reqtype, value, index, buf, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 			      USB_CTRL_GET_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	if (err > 0 && err <= size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)         if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)             memcpy(data, buf, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)         else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)             netdev_dbg(dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)                 "Huh? Data requested but thrown away.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)     }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 			      u16 value, u16 index, const void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 			      u16 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	void *buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 		   " value=0x%04x index=0x%04x size=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 		   cmd, reqtype, value, index, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 		buf = kmemdup(data, size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 		if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)         if (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)             WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)             err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)             goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)         }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)     }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 			      cmd, reqtype, value, index, buf, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 			      USB_CTRL_SET_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025)  * The function can't be called inside suspend/resume callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)  * otherwise deadlock will be caused.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) int usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 		    u16 value, u16 index, void *data, u16 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	if (usb_autopm_get_interface(dev->intf) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	ret = __usbnet_read_cmd(dev, cmd, reqtype, value, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 				data, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) EXPORT_SYMBOL_GPL(usbnet_read_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)  * The function can't be called inside suspend/resume callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)  * otherwise deadlock will be caused.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) int usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 		     u16 value, u16 index, const void *data, u16 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	if (usb_autopm_get_interface(dev->intf) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	ret = __usbnet_write_cmd(dev, cmd, reqtype, value, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 				 data, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) EXPORT_SYMBOL_GPL(usbnet_write_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)  * The function can be called inside suspend/resume callback safely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)  * and should only be called by suspend/resume callback generally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) int usbnet_read_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 			  u16 value, u16 index, void *data, u16 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	return __usbnet_read_cmd(dev, cmd, reqtype, value, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 				 data, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) EXPORT_SYMBOL_GPL(usbnet_read_cmd_nopm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073)  * The function can be called inside suspend/resume callback safely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)  * and should only be called by suspend/resume callback generally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) int usbnet_write_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 			  u16 value, u16 index, const void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 			  u16 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	return __usbnet_write_cmd(dev, cmd, reqtype, value, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 				  data, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) EXPORT_SYMBOL_GPL(usbnet_write_cmd_nopm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) static void usbnet_async_cmd_cb(struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	int status = urb->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 		dev_dbg(&urb->dev->dev, "%s failed with %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 			__func__, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	usb_free_urb(urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)  * The caller must make sure that device can't be put into suspend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)  * state until the control URB completes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 			   u16 value, u16 index, const void *data, u16 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	struct usb_ctrlrequest *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	struct urb *urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	void *buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 	netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 		   " value=0x%04x index=0x%04x size=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 		   cmd, reqtype, value, index, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	urb = usb_alloc_urb(0, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	if (!urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 	if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 		buf = kmemdup(data, size, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 		if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 			netdev_err(dev->net, "Error allocating buffer"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 				   " in %s!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 			goto fail_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 		goto fail_free_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	req->bRequestType = reqtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	req->bRequest = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	req->wValue = cpu_to_le16(value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	req->wIndex = cpu_to_le16(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	req->wLength = cpu_to_le16(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	usb_fill_control_urb(urb, dev->udev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 			     usb_sndctrlpipe(dev->udev, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 			     (void *)req, buf, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 			     usbnet_async_cmd_cb, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 	urb->transfer_flags |= URB_FREE_BUFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	err = usb_submit_urb(urb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 		netdev_err(dev->net, "Error submitting the control"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 			   " message: status=%d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 		goto fail_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) fail_free_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) fail_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	usb_free_urb(urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) EXPORT_SYMBOL_GPL(usbnet_write_cmd_async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) static int __init usbnet_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	/* Compiler should optimize this out. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 	BUILD_BUG_ON(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 		sizeof_field(struct sk_buff, cb) < sizeof(struct skb_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	eth_random_addr(node_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) module_init(usbnet_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) static void __exit usbnet_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) module_exit(usbnet_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) MODULE_AUTHOR("David Brownell");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) MODULE_DESCRIPTION("USB network driver framework");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) MODULE_LICENSE("GPL");