Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Driver for the PLX NET2280 USB device controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Specs and errata are available from <http://www.plxtech.com>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * PLX Technology Inc. (formerly NetChip Technology) supported the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * development of this driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * CODE STATUS HIGHLIGHTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * This driver should work well with most "gadget" drivers, including
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * the Mass Storage, Serial, and Ethernet/RNDIS gadget drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * as well as Gadget Zero and Gadgetfs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * DMA is enabled by default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * MSI is enabled by default.  The legacy IRQ is used if MSI couldn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  * be enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * Note that almost all the errata workarounds here are only needed for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  * rev1 chips.  Rev1a silicon (0110) fixes almost all of them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  * Copyright (C) 2003 David Brownell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  * Copyright (C) 2003-2005 PLX Technology, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  * Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  *	with 2282 chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  * Modified Ricardo Ribalda Qtechnology AS  to provide compatibility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  *	with usb 338x chip. Based on PLX driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <linux/usb/ch9.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include <linux/usb/gadget.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #include <linux/prefetch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define	DRIVER_DESC		"PLX NET228x/USB338x USB Peripheral Controller"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define	DRIVER_VERSION		"2005 Sept 27/v3.0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define	EP_DONTUSE		13	/* nonzero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #define USE_RDK_LEDS		/* GPIO pins control three LEDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) static const char driver_name[] = "net2280";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) static const char driver_desc[] = DRIVER_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) static const u32 ep_bit[9] = { 0, 17, 2, 19, 4, 1, 18, 3, 20 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) static const char ep0name[] = "ep0";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #define EP_INFO(_name, _caps) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	{ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 		.name = _name, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 		.caps = _caps, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	const struct usb_ep_caps caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) } ep_info_dft[] = { /* Default endpoint configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	EP_INFO(ep0name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	EP_INFO("ep-a",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	EP_INFO("ep-b",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	EP_INFO("ep-c",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	EP_INFO("ep-d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	EP_INFO("ep-e",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	EP_INFO("ep-f",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	EP_INFO("ep-g",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	EP_INFO("ep-h",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) }, ep_info_adv[] = { /* Endpoints for usb3380 advance mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	EP_INFO(ep0name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 		USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	EP_INFO("ep1in",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	EP_INFO("ep2out",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	EP_INFO("ep3in",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	EP_INFO("ep4out",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	EP_INFO("ep1out",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	EP_INFO("ep2in",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	EP_INFO("ep3out",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	EP_INFO("ep4in",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) #undef EP_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) /* mode 0 == ep-{a,b,c,d} 1K fifo each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127)  * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128)  * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) static ushort fifo_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) /* "modprobe net2280 fifo_mode=1" etc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) module_param(fifo_mode, ushort, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) /* enable_suspend -- When enabled, the driver will respond to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136)  * USB suspend requests by powering down the NET2280.  Otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137)  * USB suspend requests will be ignored.  This is acceptable for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138)  * self-powered devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) static bool enable_suspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) /* "modprobe net2280 enable_suspend=1" etc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) module_param(enable_suspend, bool, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) #define	DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) static char *type_string(u8 bmAttributes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	case USB_ENDPOINT_XFER_BULK:	return "bulk";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	case USB_ENDPOINT_XFER_ISOC:	return "iso";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	case USB_ENDPOINT_XFER_INT:	return "intr";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	return "control";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) #include "net2280.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) #define valid_bit	cpu_to_le32(BIT(VALID_BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) #define dma_done_ie	cpu_to_le32(BIT(DMA_DONE_INTERRUPT_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) static void ep_clear_seqnum(struct net2280_ep *ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) static void stop_activity(struct net2280 *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 					struct usb_gadget_driver *driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) static void ep0_start(struct net2280 *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) static inline void enable_pciirqenb(struct net2280_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	u32 tmp = readl(&ep->dev->regs->pciirqenb0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	if (ep->dev->quirks & PLX_LEGACY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		tmp |= BIT(ep->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		tmp |= BIT(ep_bit[ep->num]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	writel(tmp, &ep->dev->regs->pciirqenb0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	struct net2280		*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	struct net2280_ep	*ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	u32			max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	u32 tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	u32 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	static const u32 ep_key[9] = { 1, 0, 1, 0, 1, 1, 0, 1, 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	ep = container_of(_ep, struct net2280_ep, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	if (!_ep || !desc || ep->desc || _ep->name == ep0name ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 			desc->bDescriptorType != USB_DT_ENDPOINT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		pr_err("%s: failed at line=%d\n", __func__, __LINE__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	dev = ep->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		ret = -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		goto print_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	/* erratum 0119 workaround ties up an endpoint number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		ret = -EDOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		goto print_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	if (dev->quirks & PLX_PCIE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 		if ((desc->bEndpointAddress & 0x0f) >= 0x0c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 			ret = -EDOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 			goto print_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		ep->is_in = !!usb_endpoint_dir_in(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		if (dev->enhanced_mode && ep->is_in && ep_key[ep->num]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 			goto print_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	/* sanity check ep-e/ep-f since their fifos are small */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	max = usb_endpoint_maxp(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	if (ep->num > 4 && max > 64 && (dev->quirks & PLX_LEGACY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		ret = -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		goto print_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	spin_lock_irqsave(&dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	_ep->maxpacket = max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	ep->desc = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	/* ep_reset() has already been called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	ep->stopped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	ep->wedged = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	ep->out_overflow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	/* set speed-dependent max packet; may kick in high bandwidth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	set_max_speed(ep, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	/* set type, direction, address; reset fifo counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	if ((dev->quirks & PLX_PCIE) && dev->enhanced_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		tmp = readl(&ep->cfg->ep_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		/* If USB ep number doesn't match hardware ep number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		if ((tmp & 0xf) != usb_endpoint_num(desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 			spin_unlock_irqrestore(&dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 			goto print_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		if (ep->is_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 			tmp &= ~USB3380_EP_CFG_MASK_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 			tmp &= ~USB3380_EP_CFG_MASK_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	type = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	if (type == USB_ENDPOINT_XFER_INT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		/* erratum 0105 workaround prevents hs NYET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		if (dev->chiprev == 0100 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 				dev->gadget.speed == USB_SPEED_HIGH &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 				!(desc->bEndpointAddress & USB_DIR_IN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 			writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 				&ep->regs->ep_rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	} else if (type == USB_ENDPOINT_XFER_BULK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		/* catch some particularly blatant driver bugs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		if ((dev->gadget.speed == USB_SPEED_SUPER && max != 1024) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		    (dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		    (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 			spin_unlock_irqrestore(&dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 			ret = -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 			goto print_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	ep->is_iso = (type == USB_ENDPOINT_XFER_ISOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	/* Enable this endpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	if (dev->quirks & PLX_LEGACY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		tmp |= type << ENDPOINT_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		tmp |= desc->bEndpointAddress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		/* default full fifo lines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		tmp |= (4 << ENDPOINT_BYTE_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		tmp |= BIT(ENDPOINT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		ep->is_in = (tmp & USB_DIR_IN) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		/* In Legacy mode, only OUT endpoints are used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		if (dev->enhanced_mode && ep->is_in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 			tmp |= type << IN_ENDPOINT_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 			tmp |= BIT(IN_ENDPOINT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 			tmp |= type << OUT_ENDPOINT_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 			tmp |= BIT(OUT_ENDPOINT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 			tmp |= (ep->is_in << ENDPOINT_DIRECTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		tmp |= (4 << ENDPOINT_BYTE_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		if (!dev->enhanced_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 			tmp |= usb_endpoint_num(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		tmp |= (ep->ep.maxburst << MAX_BURST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	/* Make sure all the registers are written before ep_rsp*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	/* for OUT transfers, block the rx fifo until a read is posted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	if (!ep->is_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	else if (!(dev->quirks & PLX_2280)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		/* Added for 2282, Don't use nak packets on an in endpoint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		 * this was ignored on 2280
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		writel(BIT(CLEAR_NAK_OUT_PACKETS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 			BIT(CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	if (dev->quirks & PLX_PCIE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		ep_clear_seqnum(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	writel(tmp, &ep->cfg->ep_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	/* enable irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	if (!ep->dma) {				/* pio, per-packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		enable_pciirqenb(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		tmp = BIT(DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 			BIT(DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		if (dev->quirks & PLX_2280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 			tmp |= readl(&ep->regs->ep_irqenb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		writel(tmp, &ep->regs->ep_irqenb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	} else {				/* dma, per-request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		tmp = BIT((8 + ep->num));	/* completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		tmp |= readl(&dev->regs->pciirqenb1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		writel(tmp, &dev->regs->pciirqenb1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 		/* for short OUT transfers, dma completions can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		 * advance the queue; do it pio-style, by hand.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		 * NOTE erratum 0112 workaround #2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		if ((desc->bEndpointAddress & USB_DIR_IN) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 			tmp = BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 			writel(tmp, &ep->regs->ep_irqenb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 			enable_pciirqenb(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	tmp = desc->bEndpointAddress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	ep_dbg(dev, "enabled %s (ep%d%s-%s) %s max %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		_ep->name, tmp & 0x0f, DIR_STRING(tmp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		type_string(desc->bmAttributes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		ep->dma ? "dma" : "pio", max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	/* pci writes may still be posted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	spin_unlock_irqrestore(&dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) print_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) static int handshake(u32 __iomem *ptr, u32 mask, u32 done, int usec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	u32	result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	int	ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	ret = readl_poll_timeout_atomic(ptr, result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 					((result & mask) == done ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 					 result == U32_MAX),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 					1, usec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	if (result == U32_MAX)		/* device unplugged */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) static const struct usb_ep_ops net2280_ep_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) static void ep_reset_228x(struct net2280_regs __iomem *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 			  struct net2280_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	u32		tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	ep->desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	INIT_LIST_HEAD(&ep->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	usb_ep_set_maxpacket_limit(&ep->ep, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	ep->ep.ops = &net2280_ep_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	/* disable the dma, irqs, endpoint... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	if (ep->dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		writel(0, &ep->dma->dmactl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		writel(BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 			BIT(DMA_TRANSACTION_DONE_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 			BIT(DMA_ABORT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 			&ep->dma->dmastat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		tmp = readl(&regs->pciirqenb0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		tmp &= ~BIT(ep->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		writel(tmp, &regs->pciirqenb0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		tmp = readl(&regs->pciirqenb1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		tmp &= ~BIT((8 + ep->num));	/* completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		writel(tmp, &regs->pciirqenb1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	writel(0, &ep->regs->ep_irqenb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	/* init to our chosen defaults, notably so that we NAK OUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	 * packets until the driver queues a read (+note erratum 0112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	if (!ep->is_in || (ep->dev->quirks & PLX_2280)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		tmp = BIT(SET_NAK_OUT_PACKETS_MODE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		BIT(SET_NAK_OUT_PACKETS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		BIT(CLEAR_INTERRUPT_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		/* added for 2282 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		tmp = BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		BIT(CLEAR_NAK_OUT_PACKETS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		BIT(CLEAR_INTERRUPT_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	if (ep->num != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		tmp |= BIT(CLEAR_ENDPOINT_TOGGLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 			BIT(CLEAR_ENDPOINT_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	writel(tmp, &ep->regs->ep_rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	/* scrub most status bits, and flush any fifo state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	if (ep->dev->quirks & PLX_2280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		tmp = BIT(FIFO_OVERFLOW) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 			BIT(FIFO_UNDERFLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	writel(tmp | BIT(TIMEOUT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		BIT(USB_STALL_SENT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		BIT(USB_IN_NAK_SENT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		BIT(USB_IN_ACK_RCVD) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		BIT(USB_OUT_PING_NAK_SENT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		BIT(USB_OUT_ACK_SENT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		BIT(FIFO_FLUSH) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		BIT(DATA_IN_TOKEN_INTERRUPT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		&ep->regs->ep_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	/* fifo size is handled separately */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) static void ep_reset_338x(struct net2280_regs __iomem *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 					struct net2280_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	u32 tmp, dmastat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	ep->desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	INIT_LIST_HEAD(&ep->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	usb_ep_set_maxpacket_limit(&ep->ep, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	ep->ep.ops = &net2280_ep_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	/* disable the dma, irqs, endpoint... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	if (ep->dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		writel(0, &ep->dma->dmactl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		writel(BIT(DMA_ABORT_DONE_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		       BIT(DMA_PAUSE_DONE_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		       BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		       BIT(DMA_TRANSACTION_DONE_INTERRUPT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		       /* | BIT(DMA_ABORT), */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		       &ep->dma->dmastat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		dmastat = readl(&ep->dma->dmastat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		if (dmastat == 0x5002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 			ep_warn(ep->dev, "The dmastat return = %x!!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 			       dmastat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 			writel(0x5a, &ep->dma->dmastat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		tmp = readl(&regs->pciirqenb0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		tmp &= ~BIT(ep_bit[ep->num]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		writel(tmp, &regs->pciirqenb0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		if (ep->num < 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 			tmp = readl(&regs->pciirqenb1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 			tmp &= ~BIT((8 + ep->num));	/* completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 			writel(tmp, &regs->pciirqenb1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	writel(0, &ep->regs->ep_irqenb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	writel(BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	       BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	       BIT(FIFO_OVERFLOW) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	       BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	       BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	       BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	       BIT(DATA_IN_TOKEN_INTERRUPT), &ep->regs->ep_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	tmp = readl(&ep->cfg->ep_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	if (ep->is_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		tmp &= ~USB3380_EP_CFG_MASK_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		tmp &= ~USB3380_EP_CFG_MASK_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	writel(tmp, &ep->cfg->ep_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) static void nuke(struct net2280_ep *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) static int net2280_disable(struct usb_ep *_ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	struct net2280_ep	*ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	ep = container_of(_ep, struct net2280_ep, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	if (!_ep || _ep->name == ep0name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		pr_err("%s: Invalid ep=%p\n", __func__, _ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	spin_lock_irqsave(&ep->dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	nuke(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	if (ep->dev->quirks & PLX_PCIE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		ep_reset_338x(ep->dev->regs, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		ep_reset_228x(ep->dev->regs, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	ep_vdbg(ep->dev, "disabled %s %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 			ep->dma ? "dma" : "pio", _ep->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	/* synch memory views with the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	(void)readl(&ep->cfg->ep_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	if (!ep->dma && ep->num >= 1 && ep->num <= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		ep->dma = &ep->dev->dma[ep->num - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	spin_unlock_irqrestore(&ep->dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) static struct usb_request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) *net2280_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	struct net2280_ep	*ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	struct net2280_request	*req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	if (!_ep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		pr_err("%s: Invalid ep\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	ep = container_of(_ep, struct net2280_ep, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	req = kzalloc(sizeof(*req), gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	INIT_LIST_HEAD(&req->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	/* this dma descriptor may be swapped with the previous dummy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	if (ep->dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		struct net2280_dma	*td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		td = dma_pool_alloc(ep->dev->requests, gfp_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 				&req->td_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		if (!td) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 			kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		td->dmacount = 0;	/* not VALID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		td->dmadesc = td->dmaaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		req->td = td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	return &req->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) static void net2280_free_request(struct usb_ep *_ep, struct usb_request *_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	struct net2280_ep	*ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	struct net2280_request	*req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	ep = container_of(_ep, struct net2280_ep, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	if (!_ep || !_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		dev_err(&ep->dev->pdev->dev, "%s: Invalid ep=%p or req=%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 							__func__, _ep, _req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	req = container_of(_req, struct net2280_request, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	WARN_ON(!list_empty(&req->queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	if (req->td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		dma_pool_free(ep->dev->requests, req->td, req->td_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) /* load a packet into the fifo we use for usb IN transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602)  * works for all endpoints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604)  * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605)  * at a time, but this code is simpler because it knows it only writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606)  * one packet.  ep-a..ep-d should use dma instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) static void write_fifo(struct net2280_ep *ep, struct usb_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	struct net2280_ep_regs	__iomem *regs = ep->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	u8			*buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	u32			tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	unsigned		count, total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	/* INVARIANT:  fifo is currently empty. (testable) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	if (req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		buf = req->buf + req->actual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		prefetch(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		total = req->length - req->actual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	/* write just one packet at a time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	count = ep->ep.maxpacket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	if (count > total)	/* min() cannot be used on a bitfield */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		count = total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	ep_vdbg(ep->dev, "write %s fifo (IN) %d bytes%s req %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 			ep->ep.name, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 			(count != ep->ep.maxpacket) ? " (short)" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 			req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	while (count >= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		/* NOTE be careful if you try to align these. fifo lines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		 * should normally be full (4 bytes) and successive partial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		 * lines are ok only in certain cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		tmp = get_unaligned((u32 *)buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		cpu_to_le32s(&tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		writel(tmp, &regs->ep_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		buf += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		count -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	/* last fifo entry is "short" unless we wrote a full packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	 * also explicitly validate last word in (periodic) transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	 * when maxpacket is not a multiple of 4 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	if (count || total < ep->ep.maxpacket) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		tmp = count ? get_unaligned((u32 *)buf) : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		cpu_to_le32s(&tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		set_fifo_bytecount(ep, count & 0x03);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		writel(tmp, &regs->ep_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	/* pci writes may still be posted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) /* work around erratum 0106: PCI and USB race over the OUT fifo.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662)  * caller guarantees chiprev 0100, out endpoint is NAKing, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663)  * there's no real data in the fifo.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665)  * NOTE:  also used in cases where that erratum doesn't apply:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666)  * where the host wrote "too much" data to us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) static void out_flush(struct net2280_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	u32	__iomem *statp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	u32	tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	statp = &ep->regs->ep_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	tmp = readl(statp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	if (tmp & BIT(NAK_OUT_PACKETS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		ep_dbg(ep->dev, "%s %s %08x !NAK\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 			ep->ep.name, __func__, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	writel(BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		BIT(DATA_PACKET_RECEIVED_INTERRUPT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		statp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	writel(BIT(FIFO_FLUSH), statp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	/* Make sure that stap is written */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	tmp = readl(statp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	if (tmp & BIT(DATA_OUT_PING_TOKEN_INTERRUPT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 			/* high speed did bulk NYET; fifo isn't filling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 			ep->dev->gadget.speed == USB_SPEED_FULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		unsigned	usec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		usec = 50;		/* 64 byte bulk/interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		handshake(statp, BIT(USB_OUT_PING_NAK_SENT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 				BIT(USB_OUT_PING_NAK_SENT), usec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		/* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) /* unload packet(s) from the fifo we use for usb OUT transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702)  * returns true iff the request completed, because of short packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703)  * or the request buffer having filled with full packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705)  * for ep-a..ep-d this will read multiple packets out when they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706)  * have been accepted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) static int read_fifo(struct net2280_ep *ep, struct net2280_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	struct net2280_ep_regs	__iomem *regs = ep->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	u8			*buf = req->req.buf + req->req.actual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	unsigned		count, tmp, is_short;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	unsigned		cleanup = 0, prevent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	/* erratum 0106 ... packets coming in during fifo reads might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	 * be incompletely rejected.  not all cases have workarounds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	if (ep->dev->chiprev == 0x0100 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 			ep->dev->gadget.speed == USB_SPEED_FULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		tmp = readl(&ep->regs->ep_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		if ((tmp & BIT(NAK_OUT_PACKETS)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 			cleanup = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		else if ((tmp & BIT(FIFO_FULL))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 			start_out_naking(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 			prevent = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		/* else: hope we don't see the problem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	/* never overflow the rx buffer. the fifo reads packets until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	 * it sees a short one; we might not be ready for them all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	prefetchw(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	count = readl(&regs->ep_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	if (unlikely(count == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		tmp = readl(&ep->regs->ep_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		count = readl(&regs->ep_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		/* handled that data already? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		if (count == 0 && (tmp & BIT(NAK_OUT_PACKETS)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	tmp = req->req.length - req->req.actual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	if (count > tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		/* as with DMA, data overflow gets flushed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		if ((tmp % ep->ep.maxpacket) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 			ep_err(ep->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 				"%s out fifo %d bytes, expected %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 				ep->ep.name, count, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 			req->req.status = -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 			cleanup = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 			/* NAK_OUT_PACKETS will be set, so flushing is safe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 			 * the next read will start with the next packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		} /* else it's a ZLP, no worries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		count = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	req->req.actual += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	ep_vdbg(ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 			ep->ep.name, count, is_short ? " (short)" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 			cleanup ? " flush" : "", prevent ? " nak" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 			req, req->req.actual, req->req.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	while (count >= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		tmp = readl(&regs->ep_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		cpu_to_le32s(&tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		put_unaligned(tmp, (u32 *)buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		buf += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		count -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		tmp = readl(&regs->ep_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		/* LE conversion is implicit here: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 			*buf++ = (u8) tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			tmp >>= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		} while (--count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	if (cleanup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		out_flush(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	if (prevent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		writel(BIT(CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		(void) readl(&ep->regs->ep_rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	return is_short || req->req.actual == req->req.length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) /* fill out dma descriptor to match a given request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) static void fill_dma_desc(struct net2280_ep *ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 					struct net2280_request *req, int valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	struct net2280_dma	*td = req->td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	u32			dmacount = req->req.length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	/* don't let DMA continue after a short OUT packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	 * so overruns can't affect the next transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	 * in case of overruns on max-size packets, we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	 * stop the fifo from filling but we can flush it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	if (ep->is_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		dmacount |= BIT(DMA_DIRECTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 					!(ep->dev->quirks & PLX_2280))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		dmacount |= BIT(END_OF_CHAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	req->valid = valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	if (valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		dmacount |= BIT(VALID_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	dmacount |= BIT(DMA_DONE_INTERRUPT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	/* td->dmadesc = previously set by caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	td->dmaaddr = cpu_to_le32 (req->req.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	/* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	td->dmacount = cpu_to_le32(dmacount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) static const u32 dmactl_default =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		BIT(DMA_CLEAR_COUNT_ENABLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		/* erratum 0116 workaround part 1 (use POLLING) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		(POLL_100_USEC << DESCRIPTOR_POLLING_RATE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		BIT(DMA_VALID_BIT_POLLING_ENABLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		BIT(DMA_VALID_BIT_ENABLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		BIT(DMA_SCATTER_GATHER_ENABLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		/* erratum 0116 workaround part 2 (no AUTOSTART) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		BIT(DMA_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) static inline void spin_stop_dma(struct net2280_dma_regs __iomem *dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	handshake(&dma->dmactl, BIT(DMA_ENABLE), 0, 50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) static inline void stop_dma(struct net2280_dma_regs __iomem *dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	writel(readl(&dma->dmactl) & ~BIT(DMA_ENABLE), &dma->dmactl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	spin_stop_dma(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	struct net2280_dma_regs	__iomem *dma = ep->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	unsigned int tmp = BIT(VALID_BIT) | (ep->is_in << DMA_DIRECTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	if (!(ep->dev->quirks & PLX_2280))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		tmp |= BIT(END_OF_CHAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	writel(tmp, &dma->dmacount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	writel(readl(&dma->dmastat), &dma->dmastat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	writel(td_dma, &dma->dmadesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	if (ep->dev->quirks & PLX_PCIE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		dmactl |= BIT(DMA_REQUEST_OUTSTANDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	writel(dmactl, &dma->dmactl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	/* erratum 0116 workaround part 3:  pci arbiter away from net2280 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	(void) readl(&ep->dev->pci->pcimstctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	writel(BIT(DMA_START), &dma->dmastat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	u32			tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	struct net2280_dma_regs	__iomem *dma = ep->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	/* FIXME can't use DMA for ZLPs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	/* on this path we "know" there's no dma active (yet) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	WARN_ON(readl(&dma->dmactl) & BIT(DMA_ENABLE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	writel(0, &ep->dma->dmactl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	/* previous OUT packet might have been short */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	if (!ep->is_in && (readl(&ep->regs->ep_stat) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 				BIT(NAK_OUT_PACKETS))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		writel(BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 			&ep->regs->ep_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		tmp = readl(&ep->regs->ep_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		if (tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 			writel(readl(&dma->dmastat), &dma->dmastat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 			/* transfer all/some fifo data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 			writel(req->req.dma, &dma->dmaaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 			tmp = min(tmp, req->req.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 			/* dma irq, faking scatterlist status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 			req->td->dmacount = cpu_to_le32(req->req.length - tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 			writel(BIT(DMA_DONE_INTERRUPT_ENABLE) | tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 					&dma->dmacount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 			req->td->dmadesc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 			req->valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 			writel(BIT(DMA_ENABLE), &dma->dmactl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 			writel(BIT(DMA_START), &dma->dmastat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		stop_out_naking(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	tmp = dmactl_default;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	/* force packet boundaries between dma requests, but prevent the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	 * controller from automagically writing a last "short" packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	 * (zero length) unless the driver explicitly said to do that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	if (ep->is_in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		if (likely((req->req.length % ep->ep.maxpacket) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 							req->req.zero)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 			tmp |= BIT(DMA_FIFO_VALIDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 			ep->in_fifo_validate = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 			ep->in_fifo_validate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	/* init req->td, pointing to the current dummy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	req->td->dmadesc = cpu_to_le32 (ep->td_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	fill_dma_desc(ep, req, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	start_queue(ep, tmp, req->td_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	struct net2280_dma	*end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	dma_addr_t		tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	/* swap new dummy for old, link; fill and maybe activate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	end = ep->dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	ep->dummy = req->td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	req->td = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	tmp = ep->td_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	ep->td_dma = req->td_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	req->td_dma = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	end->dmadesc = cpu_to_le32 (ep->td_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	fill_dma_desc(ep, req, valid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) done(struct net2280_ep *ep, struct net2280_request *req, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	struct net2280		*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	unsigned		stopped = ep->stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	list_del_init(&req->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	if (req->req.status == -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		req->req.status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		status = req->req.status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	dev = ep->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	if (ep->dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	if (status && status != -ESHUTDOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		ep_vdbg(dev, "complete %s req %p stat %d len %u/%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 			ep->ep.name, &req->req, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 			req->req.actual, req->req.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	/* don't modify queue heads during completion callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	ep->stopped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	spin_unlock(&dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	usb_gadget_giveback_request(&ep->ep, &req->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	spin_lock(&dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	ep->stopped = stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) net2280_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	struct net2280_request	*req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	struct net2280_ep	*ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	struct net2280		*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	/* we always require a cpu-view buffer, so that we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	 * always use pio (as fallback or whatever).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	ep = container_of(_ep, struct net2280_ep, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	if (!_ep || (!ep->desc && ep->num != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	req = container_of(_req, struct net2280_request, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	if (!_req || !_req->complete || !_req->buf ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 				!list_empty(&req->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		goto print_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	if (_req->length > (~0 & DMA_BYTE_COUNT_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		ret = -EDOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		goto print_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	dev = ep->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		ret = -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		goto print_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	/* FIXME implement PIO fallback for ZLPs with DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	if (ep->dma && _req->length == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		goto print_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	/* set up dma mapping in case the caller didn't */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	if (ep->dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		ret = usb_gadget_map_request(&dev->gadget, _req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 				ep->is_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 			goto print_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	ep_vdbg(dev, "%s queue req %p, len %d buf %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 			_ep->name, _req, _req->length, _req->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	spin_lock_irqsave(&dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	_req->status = -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	_req->actual = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	/* kickstart this i/o queue? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	if  (list_empty(&ep->queue) && !ep->stopped &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		!((dev->quirks & PLX_PCIE) && ep->dma &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		  (readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		/* use DMA if the endpoint supports it, else pio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		if (ep->dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 			start_dma(ep, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 			/* maybe there's no control data, just status ack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 			if (ep->num == 0 && _req->length == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 				allow_status(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 				done(ep, req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 				ep_vdbg(dev, "%s status ack\n", ep->ep.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 				goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 			/* PIO ... stuff the fifo, or unblock it.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 			if (ep->is_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 				write_fifo(ep, _req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 			else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 				u32	s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 				/* OUT FIFO might have packet(s) buffered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 				s = readl(&ep->regs->ep_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 				if ((s & BIT(FIFO_EMPTY)) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 					/* note:  _req->short_not_ok is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 					 * ignored here since PIO _always_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 					 * stops queue advance here, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 					 * _req->status doesn't change for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 					 * short reads (only _req->actual)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 					 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 					if (read_fifo(ep, req) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 							ep->num == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 						done(ep, req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 						allow_status(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 						/* don't queue it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 						req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 					} else if (read_fifo(ep, req) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 							ep->num != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 						done(ep, req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 						req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 					} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 						s = readl(&ep->regs->ep_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 				/* don't NAK, let the fifo fill */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 				if (req && (s & BIT(NAK_OUT_PACKETS)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 					writel(BIT(CLEAR_NAK_OUT_PACKETS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 							&ep->regs->ep_rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	} else if (ep->dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		int	valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		if (ep->is_in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 			int	expect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 			/* preventing magic zlps is per-engine state, not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 			 * per-transfer; irq logic must recover hiccups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 			expect = likely(req->req.zero ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 				(req->req.length % ep->ep.maxpacket));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 			if (expect != ep->in_fifo_validate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 				valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		queue_dma(ep, req, valid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	} /* else the irq handler advances the queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	ep->responded = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	if (req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		list_add_tail(&req->queue, &ep->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	spin_unlock_irqrestore(&dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	/* pci writes may still be posted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) print_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) dma_done(struct net2280_ep *ep,	struct net2280_request *req, u32 dmacount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	done(ep, req, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) static int scan_dma_completions(struct net2280_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	int num_completed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	/* only look at descriptors that were "naturally" retired,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	 * so fifo and list head state won't matter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	while (!list_empty(&ep->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		struct net2280_request	*req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		u32 req_dma_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		req = list_entry(ep->queue.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 				struct net2280_request, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		if (!req->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		req_dma_count = le32_to_cpup(&req->td->dmacount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		if ((req_dma_count & BIT(VALID_BIT)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		/* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		 * cases where DMA must be aborted; this code handles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		 * all non-abort DMA completions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		if (unlikely(req->td->dmadesc == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 			/* paranoia */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 			u32 const ep_dmacount = readl(&ep->dma->dmacount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 			if (ep_dmacount & DMA_BYTE_COUNT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 			/* single transfer mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 			dma_done(ep, req, req_dma_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 			num_completed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		} else if (!ep->is_in &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 			   (req->req.length % ep->ep.maxpacket) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 			   !(ep->dev->quirks & PLX_PCIE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 			u32 const ep_stat = readl(&ep->regs->ep_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 			/* AVOID TROUBLE HERE by not issuing short reads from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 			 * your gadget driver.  That helps avoids errata 0121,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 			 * 0122, and 0124; not all cases trigger the warning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 			if ((ep_stat & BIT(NAK_OUT_PACKETS)) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 				ep_warn(ep->dev, "%s lost packet sync!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 						ep->ep.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 				req->req.status = -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 				u32 const ep_avail = readl(&ep->regs->ep_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 				if (ep_avail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 					/* fifo gets flushed later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 					ep->out_overflow = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 					ep_dbg(ep->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 						"%s dma, discard %d len %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 						ep->ep.name, ep_avail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 						req->req.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 					req->req.status = -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		dma_done(ep, req, req_dma_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		num_completed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	return num_completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) static void restart_dma(struct net2280_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	struct net2280_request	*req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	if (ep->stopped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	req = list_entry(ep->queue.next, struct net2280_request, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	start_dma(ep, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) static void abort_dma(struct net2280_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	/* abort the current transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	if (likely(!list_empty(&ep->queue))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		/* FIXME work around errata 0121, 0122, 0124 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		writel(BIT(DMA_ABORT), &ep->dma->dmastat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		spin_stop_dma(ep->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		stop_dma(ep->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	scan_dma_completions(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /* dequeue ALL requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) static void nuke(struct net2280_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	struct net2280_request	*req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	/* called with spinlock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	ep->stopped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	if (ep->dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		abort_dma(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	while (!list_empty(&ep->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		req = list_entry(ep->queue.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 				struct net2280_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 				queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		done(ep, req, -ESHUTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) /* dequeue JUST ONE request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	struct net2280_ep	*ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	struct net2280_request	*req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	u32			dmactl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	int			stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	ep = container_of(_ep, struct net2280_ep, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	if (!_ep || (!ep->desc && ep->num != 0) || !_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		pr_err("%s: Invalid ep=%p or ep->desc or req=%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 						__func__, _ep, _req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	spin_lock_irqsave(&ep->dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	stopped = ep->stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	/* quiesce dma while we patch the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	dmactl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	ep->stopped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	if (ep->dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		dmactl = readl(&ep->dma->dmactl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		/* WARNING erratum 0127 may kick in ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		stop_dma(ep->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 		scan_dma_completions(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	/* make sure it's still queued on this endpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	list_for_each_entry(req, &ep->queue, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		if (&req->req == _req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	if (&req->req != _req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		ep->stopped = stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		spin_unlock_irqrestore(&ep->dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		ep_dbg(ep->dev, "%s: Request mismatch\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	/* queue head may be partially complete. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	if (ep->queue.next == &req->queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		if (ep->dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 			ep_dbg(ep->dev, "unlink (%s) dma\n", _ep->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 			_req->status = -ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 			abort_dma(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 			if (likely(ep->queue.next == &req->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 				/* NOTE: misreports single-transfer mode*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 				req->td->dmacount = 0;	/* invalidate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 				dma_done(ep, req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 					readl(&ep->dma->dmacount),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 					-ECONNRESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 			ep_dbg(ep->dev, "unlink (%s) pio\n", _ep->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 			done(ep, req, -ECONNRESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	if (req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		done(ep, req, -ECONNRESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	ep->stopped = stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	if (ep->dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		/* turn off dma on inactive queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		if (list_empty(&ep->queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 			stop_dma(ep->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		else if (!ep->stopped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 			/* resume current request, or start new one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 			if (req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 				writel(dmactl, &ep->dma->dmactl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 				start_dma(ep, list_entry(ep->queue.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 					struct net2280_request, queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	spin_unlock_irqrestore(&ep->dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) static int net2280_fifo_status(struct usb_ep *_ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	struct net2280_ep	*ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	int			retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	ep = container_of(_ep, struct net2280_ep, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	if (!_ep || (!ep->desc && ep->num != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		retval = -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		goto print_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 						== USB_ENDPOINT_XFER_ISOC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		goto print_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	spin_lock_irqsave(&ep->dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	if (!list_empty(&ep->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		retval = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		goto print_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	} else if (ep->is_in && value && net2280_fifo_status(_ep) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		retval = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 		goto print_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		ep_vdbg(ep->dev, "%s %s %s\n", _ep->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 				value ? "set" : "clear",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 				wedged ? "wedge" : "halt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		/* set/clear, then synch memory views with the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		if (value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 			if (ep->num == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 				ep->dev->protocol_stall = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 				set_halt(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 			if (wedged)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 				ep->wedged = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 			clear_halt(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 			if (ep->dev->quirks & PLX_PCIE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 				!list_empty(&ep->queue) && ep->td_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 					restart_dma(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 			ep->wedged = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		(void) readl(&ep->regs->ep_rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	spin_unlock_irqrestore(&ep->dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) print_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	spin_unlock_irqrestore(&ep->dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) print_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) static int net2280_set_halt(struct usb_ep *_ep, int value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	return net2280_set_halt_and_wedge(_ep, value, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) static int net2280_set_wedge(struct usb_ep *_ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	if (!_ep || _ep->name == ep0name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 		pr_err("%s: Invalid ep=%p or ep0\n", __func__, _ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	return net2280_set_halt_and_wedge(_ep, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) static int net2280_fifo_status(struct usb_ep *_ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	struct net2280_ep	*ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	u32			avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	ep = container_of(_ep, struct net2280_ep, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	if (!_ep || (!ep->desc && ep->num != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		dev_err(&ep->dev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 			"%s: Invalid driver=%p or speed=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 			__func__, ep->dev->driver, ep->dev->gadget.speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		return -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	avail = readl(&ep->regs->ep_avail) & (BIT(12) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	if (avail > ep->fifo_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		dev_err(&ep->dev->pdev->dev, "%s: Fifo overflow\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	if (ep->is_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 		avail = ep->fifo_size - avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	return avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) static void net2280_fifo_flush(struct usb_ep *_ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	struct net2280_ep	*ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	ep = container_of(_ep, struct net2280_ep, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	if (!_ep || (!ep->desc && ep->num != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		dev_err(&ep->dev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 			"%s: Invalid driver=%p or speed=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 			__func__, ep->dev->driver, ep->dev->gadget.speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	(void) readl(&ep->regs->ep_rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) static const struct usb_ep_ops net2280_ep_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	.enable		= net2280_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	.disable	= net2280_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	.alloc_request	= net2280_alloc_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	.free_request	= net2280_free_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	.queue		= net2280_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	.dequeue	= net2280_dequeue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	.set_halt	= net2280_set_halt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	.set_wedge	= net2280_set_wedge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	.fifo_status	= net2280_fifo_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	.fifo_flush	= net2280_fifo_flush,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) static int net2280_get_frame(struct usb_gadget *_gadget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	struct net2280		*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	u16			retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	if (!_gadget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	dev = container_of(_gadget, struct net2280, gadget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	spin_lock_irqsave(&dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	retval = get_idx_reg(dev->regs, REG_FRAME) & 0x03ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	spin_unlock_irqrestore(&dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) static int net2280_wakeup(struct usb_gadget *_gadget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	struct net2280		*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	u32			tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	if (!_gadget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	dev = container_of(_gadget, struct net2280, gadget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	spin_lock_irqsave(&dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	tmp = readl(&dev->usb->usbctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	if (tmp & BIT(DEVICE_REMOTE_WAKEUP_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 		writel(BIT(GENERATE_RESUME), &dev->usb->usbstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	spin_unlock_irqrestore(&dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	/* pci writes may still be posted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) static int net2280_set_selfpowered(struct usb_gadget *_gadget, int value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	struct net2280		*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	u32			tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	if (!_gadget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	dev = container_of(_gadget, struct net2280, gadget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	spin_lock_irqsave(&dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	tmp = readl(&dev->usb->usbctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	if (value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		tmp |= BIT(SELF_POWERED_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		_gadget->is_selfpowered = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 		tmp &= ~BIT(SELF_POWERED_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 		_gadget->is_selfpowered = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	writel(tmp, &dev->usb->usbctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	spin_unlock_irqrestore(&dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	struct net2280  *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	u32             tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	unsigned long   flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	if (!_gadget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	dev = container_of(_gadget, struct net2280, gadget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	spin_lock_irqsave(&dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	tmp = readl(&dev->usb->usbctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	dev->softconnect = (is_on != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	if (is_on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 		ep0_start(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 		writel(tmp | BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 		writel(tmp & ~BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		stop_activity(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	spin_unlock_irqrestore(&dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) static struct usb_ep *net2280_match_ep(struct usb_gadget *_gadget,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 		struct usb_endpoint_descriptor *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 		struct usb_ss_ep_comp_descriptor *ep_comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	char name[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	struct usb_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 		/* ep-e, ep-f are PIO with only 64 byte fifos */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 		ep = gadget_find_ep_by_name(_gadget, "ep-e");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 			return ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 		ep = gadget_find_ep_by_name(_gadget, "ep-f");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 			return ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	/* USB3380: Only first four endpoints have DMA channels. Allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	 * slower interrupt endpoints from PIO hw endpoints, to allow bulk/isoc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	 * endpoints use DMA hw endpoints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	    usb_endpoint_dir_in(desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 		ep = gadget_find_ep_by_name(_gadget, "ep2in");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 			return ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		ep = gadget_find_ep_by_name(_gadget, "ep4in");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 		if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 			return ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	} else if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 		   !usb_endpoint_dir_in(desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 		ep = gadget_find_ep_by_name(_gadget, "ep1out");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 		if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 			return ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 		ep = gadget_find_ep_by_name(_gadget, "ep3out");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 			return ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	} else if (usb_endpoint_type(desc) != USB_ENDPOINT_XFER_BULK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 		   usb_endpoint_dir_in(desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 		ep = gadget_find_ep_by_name(_gadget, "ep1in");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 			return ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 		ep = gadget_find_ep_by_name(_gadget, "ep3in");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 		if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 			return ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	} else if (usb_endpoint_type(desc) != USB_ENDPOINT_XFER_BULK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 		   !usb_endpoint_dir_in(desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 		ep = gadget_find_ep_by_name(_gadget, "ep2out");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 		if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 			return ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 		ep = gadget_find_ep_by_name(_gadget, "ep4out");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 		if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 			return ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	/* USB3380: use same address for usb and hardware endpoints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	snprintf(name, sizeof(name), "ep%d%s", usb_endpoint_num(desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 			usb_endpoint_dir_in(desc) ? "in" : "out");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	ep = gadget_find_ep_by_name(_gadget, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 		return ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) static int net2280_start(struct usb_gadget *_gadget,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 		struct usb_gadget_driver *driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) static int net2280_stop(struct usb_gadget *_gadget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) static const struct usb_gadget_ops net2280_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	.get_frame	= net2280_get_frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	.wakeup		= net2280_wakeup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	.set_selfpowered = net2280_set_selfpowered,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	.pullup		= net2280_pullup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	.udc_start	= net2280_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	.udc_stop	= net2280_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	.match_ep	= net2280_match_ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) #ifdef	CONFIG_USB_GADGET_DEBUG_FILES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) /* FIXME move these into procfs, and use seq_file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)  * Sysfs _still_ doesn't behave for arbitrarily sized files,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)  * and also doesn't help products using this with 2.4 kernels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) /* "function" sysfs attribute */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) static ssize_t function_show(struct device *_dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 			     char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	struct net2280	*dev = dev_get_drvdata(_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	if (!dev->driver || !dev->driver->function ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 			strlen(dev->driver->function) > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) static DEVICE_ATTR_RO(function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) static ssize_t registers_show(struct device *_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 			      struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	struct net2280		*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	char			*next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	unsigned		size, t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	int			i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	u32			t1, t2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	const char		*s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	dev = dev_get_drvdata(_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	next = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	size = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	spin_lock_irqsave(&dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	if (dev->driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 		s = dev->driver->driver.name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 		s = "(none)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	/* Main Control Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	t = scnprintf(next, size, "%s version " DRIVER_VERSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 			", chiprev %04x\n\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 			"devinit %03x fifoctl %08x gadget '%s'\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 			"pci irqenb0 %02x irqenb1 %08x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 			"irqstat0 %04x irqstat1 %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 			driver_name, dev->chiprev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 			readl(&dev->regs->devinit),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 			readl(&dev->regs->fifoctl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 			s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 			readl(&dev->regs->pciirqenb0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 			readl(&dev->regs->pciirqenb1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 			readl(&dev->regs->irqstat0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 			readl(&dev->regs->irqstat1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	size -= t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	next += t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	/* USB Control Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	t1 = readl(&dev->usb->usbctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	t2 = readl(&dev->usb->usbstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	if (t1 & BIT(VBUS_PIN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		if (t2 & BIT(HIGH_SPEED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 			s = "high speed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 		else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 			s = "powered";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 			s = "full speed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 		/* full speed bit (6) not working?? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 			s = "not attached";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	t = scnprintf(next, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 			"stdrsp %08x usbctl %08x usbstat %08x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 				"addr 0x%02x (%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 			readl(&dev->usb->stdrsp), t1, t2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 			readl(&dev->usb->ouraddr), s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	size -= t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	next += t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	/* PCI Master Control Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	/* DMA Control Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	/* Configurable EP Control Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	for (i = 0; i < dev->n_ep; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		struct net2280_ep	*ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 		ep = &dev->ep[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 		if (i && !ep->desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 		t1 = readl(&ep->cfg->ep_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 		t2 = readl(&ep->regs->ep_rsp) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 		t = scnprintf(next, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 				"\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 					"irqenb %02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 				ep->ep.name, t1, t2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 				(t2 & BIT(CLEAR_NAK_OUT_PACKETS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 					? "NAK " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 				(t2 & BIT(CLEAR_EP_HIDE_STATUS_PHASE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 					? "hide " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 				(t2 & BIT(CLEAR_EP_FORCE_CRC_ERROR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 					? "CRC " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 				(t2 & BIT(CLEAR_INTERRUPT_MODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 					? "interrupt " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 				(t2 & BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 					? "status " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 				(t2 & BIT(CLEAR_NAK_OUT_PACKETS_MODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 					? "NAKmode " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 				(t2 & BIT(CLEAR_ENDPOINT_TOGGLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 					? "DATA1 " : "DATA0 ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 				(t2 & BIT(CLEAR_ENDPOINT_HALT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 					? "HALT " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 				readl(&ep->regs->ep_irqenb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 		size -= t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		next += t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 		t = scnprintf(next, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 				"\tstat %08x avail %04x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 				"(ep%d%s-%s)%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 				readl(&ep->regs->ep_stat),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 				readl(&ep->regs->ep_avail),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 				t1 & 0x0f, DIR_STRING(t1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 				type_string(t1 >> 8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 				ep->stopped ? "*" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 		size -= t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 		next += t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 		if (!ep->dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 		t = scnprintf(next, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 				"  dma\tctl %08x stat %08x count %08x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 				"\taddr %08x desc %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 				readl(&ep->dma->dmactl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 				readl(&ep->dma->dmastat),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 				readl(&ep->dma->dmacount),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 				readl(&ep->dma->dmaaddr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 				readl(&ep->dma->dmadesc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 		size -= t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 		next += t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	/* Indexed Registers (none yet) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 	/* Statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	t = scnprintf(next, size, "\nirqs:  ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	size -= t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	next += t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	for (i = 0; i < dev->n_ep; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 		struct net2280_ep	*ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 		ep = &dev->ep[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 		if (i && !ep->irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 		t = scnprintf(next, size, " %s/%lu", ep->ep.name, ep->irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 		size -= t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 		next += t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	t = scnprintf(next, size, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	size -= t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	next += t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	spin_unlock_irqrestore(&dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	return PAGE_SIZE - size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) static DEVICE_ATTR_RO(registers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) static ssize_t queues_show(struct device *_dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 			   char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	struct net2280		*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	char			*next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	unsigned		size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	int			i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	dev = dev_get_drvdata(_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	next = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	size = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	spin_lock_irqsave(&dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	for (i = 0; i < dev->n_ep; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 		struct net2280_ep		*ep = &dev->ep[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 		struct net2280_request		*req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 		int				t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 		if (i != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 			const struct usb_endpoint_descriptor	*d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 			d = ep->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 			if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 			t = d->bEndpointAddress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 			t = scnprintf(next, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 				"\n%s (ep%d%s-%s) max %04x %s fifo %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 				ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 				(t & USB_DIR_IN) ? "in" : "out",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 				type_string(d->bmAttributes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 				usb_endpoint_maxp(d),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 				ep->dma ? "dma" : "pio", ep->fifo_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 				);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 		} else /* ep0 should only have one transfer queued */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 			t = scnprintf(next, size, "ep0 max 64 pio %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 					ep->is_in ? "in" : "out");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 		if (t <= 0 || t > size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 		size -= t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 		next += t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 		if (list_empty(&ep->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 			t = scnprintf(next, size, "\t(nothing queued)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 			if (t <= 0 || t > size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 				goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 			size -= t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 			next += t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 		list_for_each_entry(req, &ep->queue, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 			if (ep->dma && req->td_dma == readl(&ep->dma->dmadesc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 				t = scnprintf(next, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 					"\treq %p len %d/%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 					"buf %p (dmacount %08x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 					&req->req, req->req.actual,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 					req->req.length, req->req.buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 					readl(&ep->dma->dmacount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 				t = scnprintf(next, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 					"\treq %p len %d/%d buf %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 					&req->req, req->req.actual,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 					req->req.length, req->req.buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 			if (t <= 0 || t > size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 				goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 			size -= t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 			next += t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 			if (ep->dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 				struct net2280_dma	*td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 				td = req->td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 				t = scnprintf(next, size, "\t    td %08x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 					" count %08x buf %08x desc %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 					(u32) req->td_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 					le32_to_cpu(td->dmacount),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 					le32_to_cpu(td->dmaaddr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 					le32_to_cpu(td->dmadesc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 				if (t <= 0 || t > size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 					goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 				size -= t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 				next += t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	spin_unlock_irqrestore(&dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	return PAGE_SIZE - size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) static DEVICE_ATTR_RO(queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) #define device_create_file(a, b)	(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) #define device_remove_file(a, b)	do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) /* another driver-specific mode might be a request type doing dma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)  * to/from another device fifo instead of to/from memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) static void set_fifo_mode(struct net2280 *dev, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	/* keeping high bits preserves BAR2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	writel((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	/* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	INIT_LIST_HEAD(&dev->gadget.ep_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	switch (mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 		list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 		list_add_tail(&dev->ep[4].ep.ep_list, &dev->gadget.ep_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 		dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 		dev->ep[1].fifo_size = dev->ep[2].fifo_size = 2048;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 		list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 		dev->ep[1].fifo_size = 2048;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 		dev->ep[2].fifo_size = 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	/* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	list_add_tail(&dev->ep[5].ep.ep_list, &dev->gadget.ep_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	list_add_tail(&dev->ep[6].ep.ep_list, &dev->gadget.ep_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) static void defect7374_disable_data_eps(struct net2280 *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	 * For Defect 7374, disable data EPs (and more):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	 *  - This phase undoes the earlier phase of the Defect 7374 workaround,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	 *    returing ep regs back to normal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	struct net2280_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	unsigned char ep_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	u32 tmp_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	for (i = 1; i < 5; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 		ep = &dev->ep[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 		writel(i, &ep->cfg->ep_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	/* CSROUT, CSRIN, PCIOUT, PCIIN, STATIN, RCIN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	for (i = 0; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 		writel(0, &dev->dep[i].dep_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 		/* Select an endpoint for subsequent operations: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 		tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 		writel(((tmp_reg & ~0x1f) | ep_sel), &dev->plregs->pl_ep_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 		if (ep_sel < 2 || (ep_sel > 9 && ep_sel < 14) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 					ep_sel == 18 || ep_sel == 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		/* Change settings on some selected endpoints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		tmp_reg = readl(&dev->plregs->pl_ep_cfg_4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 		tmp_reg &= ~BIT(NON_CTRL_IN_TOLERATE_BAD_DIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 		writel(tmp_reg, &dev->plregs->pl_ep_cfg_4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 		tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 		tmp_reg |= BIT(EP_INITIALIZED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		writel(tmp_reg, &dev->plregs->pl_ep_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) static void defect7374_enable_data_eps_zero(struct net2280 *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	u32 tmp = 0, tmp_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	u32 scratch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	unsigned char ep_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	scratch = get_idx_reg(dev->regs, SCRATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	WARN_ON((scratch & (0xf << DEFECT7374_FSM_FIELD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 		== DEFECT7374_FSM_SS_CONTROL_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	ep_warn(dev, "Operate Defect 7374 workaround soft this time");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 	ep_warn(dev, "It will operate on cold-reboot and SS connect");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	/*GPEPs:*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_DIRECTION) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 			(2 << OUT_ENDPOINT_TYPE) | (2 << IN_ENDPOINT_TYPE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 			((dev->enhanced_mode) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 			 BIT(OUT_ENDPOINT_ENABLE) | BIT(IN_ENDPOINT_ENABLE) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 			 BIT(ENDPOINT_ENABLE)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	for (i = 1; i < 5; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 		writel(tmp, &dev->ep[i].cfg->ep_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	/* CSRIN, PCIIN, STATIN, RCIN*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_ENABLE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	writel(tmp, &dev->dep[1].dep_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	writel(tmp, &dev->dep[3].dep_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	writel(tmp, &dev->dep[4].dep_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	writel(tmp, &dev->dep[5].dep_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	/*Implemented for development and debug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	 * Can be refined/tuned later.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 		/* Select an endpoint for subsequent operations: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 		tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 		writel(((tmp_reg & ~0x1f) | ep_sel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 				&dev->plregs->pl_ep_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 		if (ep_sel == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 			tmp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 				(readl(&dev->plregs->pl_ep_ctrl) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 				 BIT(CLEAR_ACK_ERROR_CODE) | 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 			writel(tmp, &dev->plregs->pl_ep_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 		if (ep_sel == 0 || (ep_sel > 9 && ep_sel < 14) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 				ep_sel == 18  || ep_sel == 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 		tmp = (readl(&dev->plregs->pl_ep_cfg_4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 				BIT(NON_CTRL_IN_TOLERATE_BAD_DIR) | 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 		writel(tmp, &dev->plregs->pl_ep_cfg_4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 		tmp = readl(&dev->plregs->pl_ep_ctrl) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 			~BIT(EP_INITIALIZED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 		writel(tmp, &dev->plregs->pl_ep_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	/* Set FSM to focus on the first Control Read:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	 * - Tip: Connection speed is known upon the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 	 * setup request.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	scratch |= DEFECT7374_FSM_WAITING_FOR_CONTROL_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	set_idx_reg(dev->regs, SCRATCH, scratch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) /* keeping it simple:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052)  * - one bus driver, initted first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)  * - one function driver, initted second
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)  * most of the work to support multiple net2280 controllers would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)  * be to associate this gadget driver (yes?) with all of them, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)  * perhaps to bind specific drivers to specific devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) static void usb_reset_228x(struct net2280 *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	u32	tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	dev->gadget.speed = USB_SPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	(void) readl(&dev->usb->usbctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	net2280_led_init(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	/* disable automatic responses, and irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	writel(0, &dev->usb->stdrsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	writel(0, &dev->regs->pciirqenb0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	writel(0, &dev->regs->pciirqenb1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	/* clear old dma and irq state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	for (tmp = 0; tmp < 4; tmp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 		struct net2280_ep       *ep = &dev->ep[tmp + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 		if (ep->dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 			abort_dma(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	writel(~0, &dev->regs->irqstat0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	writel(~(u32)BIT(SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	/* reset, and enable pci */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	tmp = readl(&dev->regs->devinit) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 		BIT(PCI_ENABLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 		BIT(FIFO_SOFT_RESET) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 		BIT(USB_SOFT_RESET) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 		BIT(M8051_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	writel(tmp, &dev->regs->devinit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	/* standard fifo and endpoint allocations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	set_fifo_mode(dev, (fifo_mode <= 2) ? fifo_mode : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) static void usb_reset_338x(struct net2280 *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	dev->gadget.speed = USB_SPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	(void)readl(&dev->usb->usbctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	net2280_led_init(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	if (dev->bug7734_patched) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 		/* disable automatic responses, and irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 		writel(0, &dev->usb->stdrsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 		writel(0, &dev->regs->pciirqenb0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 		writel(0, &dev->regs->pciirqenb1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	/* clear old dma and irq state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	for (tmp = 0; tmp < 4; tmp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 		struct net2280_ep *ep = &dev->ep[tmp + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 		struct net2280_dma_regs __iomem *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 		if (ep->dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 			abort_dma(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 			dma = &dev->dma[tmp];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 			writel(BIT(DMA_ABORT), &dma->dmastat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 			writel(0, &dma->dmactl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	writel(~0, &dev->regs->irqstat0), writel(~0, &dev->regs->irqstat1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	if (dev->bug7734_patched) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 		/* reset, and enable pci */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 		tmp = readl(&dev->regs->devinit) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 		    BIT(PCI_ENABLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 		    BIT(FIFO_SOFT_RESET) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 		    BIT(USB_SOFT_RESET) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 		    BIT(M8051_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 		writel(tmp, &dev->regs->devinit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 	/* always ep-{1,2,3,4} ... maybe not ep-3 or ep-4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	INIT_LIST_HEAD(&dev->gadget.ep_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 	for (tmp = 1; tmp < dev->n_ep; tmp++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 		list_add_tail(&dev->ep[tmp].ep.ep_list, &dev->gadget.ep_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) static void usb_reset(struct net2280 *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	if (dev->quirks & PLX_LEGACY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 		return usb_reset_228x(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	return usb_reset_338x(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) static void usb_reinit_228x(struct net2280 *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 	u32	tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	/* basic endpoint init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	for (tmp = 0; tmp < 7; tmp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 		struct net2280_ep	*ep = &dev->ep[tmp];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 		ep->ep.name = ep_info_dft[tmp].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 		ep->ep.caps = ep_info_dft[tmp].caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 		ep->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 		ep->num = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 		if (tmp > 0 && tmp <= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 			ep->fifo_size = 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 			ep->dma = &dev->dma[tmp - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 			ep->fifo_size = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 		ep->regs = &dev->epregs[tmp];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 		ep->cfg = &dev->epregs[tmp];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 		ep_reset_228x(dev->regs, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 	usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 	usb_ep_set_maxpacket_limit(&dev->ep[5].ep, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 	usb_ep_set_maxpacket_limit(&dev->ep[6].ep, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	dev->gadget.ep0 = &dev->ep[0].ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	dev->ep[0].stopped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	/* we want to prevent lowlevel/insecure access from the USB host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 	 * but erratum 0119 means this enable bit is ignored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 	for (tmp = 0; tmp < 5; tmp++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 		writel(EP_DONTUSE, &dev->dep[tmp].dep_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) static void usb_reinit_338x(struct net2280 *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 	u32 tmp, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 	static const u32 ne[9] = { 0, 1, 2, 3, 4, 1, 2, 3, 4 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 	static const u32 ep_reg_addr[9] = { 0x00, 0xC0, 0x00, 0xC0, 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 						0x00, 0xC0, 0x00, 0xC0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	/* basic endpoint init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	for (i = 0; i < dev->n_ep; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 		struct net2280_ep *ep = &dev->ep[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 		ep->ep.name = dev->enhanced_mode ? ep_info_adv[i].name :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 						   ep_info_dft[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 		ep->ep.caps = dev->enhanced_mode ? ep_info_adv[i].caps :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 						   ep_info_dft[i].caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 		ep->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 		ep->num = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 		if (i > 0 && i <= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 			ep->dma = &dev->dma[i - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 		if (dev->enhanced_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 			ep->cfg = &dev->epregs[ne[i]];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 			 * Set USB endpoint number, hardware allows same number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 			 * in both directions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 			 if (i > 0 && i < 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 				writel(ne[i], &ep->cfg->ep_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 			ep->regs = (struct net2280_ep_regs __iomem *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 				(((void __iomem *)&dev->epregs[ne[i]]) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 				ep_reg_addr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 			ep->cfg = &dev->epregs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 			ep->regs = &dev->epregs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 		ep->fifo_size = (i != 0) ? 2048 : 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 		ep_reset_338x(dev->regs, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 512);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	dev->gadget.ep0 = &dev->ep[0].ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	dev->ep[0].stopped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	/* Link layer set up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 	if (dev->bug7734_patched) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 		tmp = readl(&dev->usb_ext->usbctl2) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 		    ~(BIT(U1_ENABLE) | BIT(U2_ENABLE) | BIT(LTM_ENABLE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 		writel(tmp, &dev->usb_ext->usbctl2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	/* Hardware Defect and Workaround */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 	val = readl(&dev->llregs->ll_lfps_5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	val &= ~(0xf << TIMER_LFPS_6US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 	val |= 0x5 << TIMER_LFPS_6US;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 	writel(val, &dev->llregs->ll_lfps_5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 	val = readl(&dev->llregs->ll_lfps_6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 	val &= ~(0xffff << TIMER_LFPS_80US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 	val |= 0x0100 << TIMER_LFPS_80US;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	writel(val, &dev->llregs->ll_lfps_6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	 * AA_AB Errata. Issue 4. Workaround for SuperSpeed USB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 	 * Hot Reset Exit Handshake may Fail in Specific Case using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 	 * Default Register Settings. Workaround for Enumeration test.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 	val = readl(&dev->llregs->ll_tsn_counters_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 	val &= ~(0x1f << HOT_TX_NORESET_TS2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	val |= 0x10 << HOT_TX_NORESET_TS2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 	writel(val, &dev->llregs->ll_tsn_counters_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 	val = readl(&dev->llregs->ll_tsn_counters_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 	val &= ~(0x1f << HOT_RX_RESET_TS2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 	val |= 0x3 << HOT_RX_RESET_TS2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 	writel(val, &dev->llregs->ll_tsn_counters_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	 * AB errata. Errata 11. Workaround for Default Duration of LFPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	 * Handshake Signaling for Device-Initiated U1 Exit is too short.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	 * Without this, various enumeration failures observed with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 	 * modern superspeed hosts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	val = readl(&dev->llregs->ll_lfps_timers_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 	writel((val & 0xffff0000) | LFPS_TIMERS_2_WORKAROUND_VALUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 	       &dev->llregs->ll_lfps_timers_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	 * Set Recovery Idle to Recover bit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	 * - On SS connections, setting Recovery Idle to Recover Fmw improves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	 *   link robustness with various hosts and hubs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	 * - It is safe to set for all connection speeds; all chip revisions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	 * - R-M-W to leave other bits undisturbed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	 * - Reference PLX TT-7372
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 	val = readl(&dev->llregs->ll_tsn_chicken_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	val |= BIT(RECOVERY_IDLE_TO_RECOVER_FMW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	writel(val, &dev->llregs->ll_tsn_chicken_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	/* disable dedicated endpoints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	writel(0x0D, &dev->dep[0].dep_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 	writel(0x0D, &dev->dep[1].dep_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	writel(0x0E, &dev->dep[2].dep_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	writel(0x0E, &dev->dep[3].dep_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	writel(0x0F, &dev->dep[4].dep_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	writel(0x0C, &dev->dep[5].dep_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) static void usb_reinit(struct net2280 *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 	if (dev->quirks & PLX_LEGACY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 		return usb_reinit_228x(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	return usb_reinit_338x(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) static void ep0_start_228x(struct net2280 *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	writel(BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 		BIT(CLEAR_NAK_OUT_PACKETS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 		BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 		&dev->epregs[0].ep_rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	 * hardware optionally handles a bunch of standard requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	 * that the API hides from drivers anyway.  have it do so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 	 * endpoint status/features are handled in software, to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	 * help pass tests for some dubious behavior.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 	writel(BIT(SET_TEST_MODE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 		BIT(SET_ADDRESS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 		BIT(DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 		BIT(GET_DEVICE_STATUS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 		BIT(GET_INTERFACE_STATUS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 		&dev->usb->stdrsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 		BIT(SELF_POWERED_USB_DEVICE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 		BIT(REMOTE_WAKEUP_SUPPORT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 		(dev->softconnect << USB_DETECT_ENABLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 		BIT(SELF_POWERED_STATUS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 		&dev->usb->usbctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 	/* enable irqs so we can see ep0 and general operation  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 		BIT(ENDPOINT_0_INTERRUPT_ENABLE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 		&dev->regs->pciirqenb0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	writel(BIT(PCI_INTERRUPT_ENABLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 		BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 		BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 		BIT(PCI_RETRY_ABORT_INTERRUPT_ENABLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 		BIT(VBUS_INTERRUPT_ENABLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 		BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 		BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 		&dev->regs->pciirqenb1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 	/* don't leave any writes posted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	(void) readl(&dev->usb->usbctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) static void ep0_start_338x(struct net2280 *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	if (dev->bug7734_patched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 		writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 		       BIT(SET_EP_HIDE_STATUS_PHASE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 		       &dev->epregs[0].ep_rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 	 * hardware optionally handles a bunch of standard requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	 * that the API hides from drivers anyway.  have it do so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	 * endpoint status/features are handled in software, to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	 * help pass tests for some dubious behavior.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	writel(BIT(SET_ISOCHRONOUS_DELAY) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 	       BIT(SET_SEL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 	       BIT(SET_TEST_MODE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 	       BIT(SET_ADDRESS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 	       BIT(GET_INTERFACE_STATUS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	       BIT(GET_DEVICE_STATUS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 		&dev->usb->stdrsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 	dev->wakeup_enable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 	       (dev->softconnect << USB_DETECT_ENABLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 	       BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 	       &dev->usb->usbctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	/* enable irqs so we can see ep0 and general operation  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 	writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 	       BIT(ENDPOINT_0_INTERRUPT_ENABLE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 	       &dev->regs->pciirqenb0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 	writel(BIT(PCI_INTERRUPT_ENABLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 	       BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 	       BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 	       BIT(VBUS_INTERRUPT_ENABLE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 	       &dev->regs->pciirqenb1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 	/* don't leave any writes posted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 	(void)readl(&dev->usb->usbctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) static void ep0_start(struct net2280 *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 	if (dev->quirks & PLX_LEGACY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 		return ep0_start_228x(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	return ep0_start_338x(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) /* when a driver is successfully registered, it will receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)  * control requests including set_configuration(), which enables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404)  * non-control requests.  then usb traffic follows until a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405)  * disconnect is reported.  then a host may connect again, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406)  * the driver might get unbound.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) static int net2280_start(struct usb_gadget *_gadget,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 		struct usb_gadget_driver *driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	struct net2280		*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	int			retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	unsigned		i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 	/* insist on high speed support from the driver, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	 * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 	 * "must not be used in normal operation"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 	if (!driver || driver->max_speed < USB_SPEED_HIGH ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 			!driver->setup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 	dev = container_of(_gadget, struct net2280, gadget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 	for (i = 0; i < dev->n_ep; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 		dev->ep[i].irqs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 	/* hook up the driver ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 	driver->driver.bus = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 	dev->driver = driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 	retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 		goto err_unbind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 	retval = device_create_file(&dev->pdev->dev, &dev_attr_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 		goto err_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 	/* enable host detection and ep0; and we're ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 	 * for set_configuration as well as eventual disconnect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 	net2280_led_active(dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 	if ((dev->quirks & PLX_PCIE) && !dev->bug7734_patched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 		defect7374_enable_data_eps_zero(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 	ep0_start(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 	/* pci writes may still be posted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) err_func:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 	device_remove_file(&dev->pdev->dev, &dev_attr_function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) err_unbind:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 	dev->driver = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 	int			i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 	/* don't disconnect if it's not connected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 	if (dev->gadget.speed == USB_SPEED_UNKNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 		driver = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 	/* stop hardware; prevent new request submissions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 	 * and kill any outstanding requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 	usb_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 	for (i = 0; i < dev->n_ep; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 		nuke(&dev->ep[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 	/* report disconnect; the driver is already quiesced */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 	if (driver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 		spin_unlock(&dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 		driver->disconnect(&dev->gadget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 		spin_lock(&dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 	usb_reinit(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) static int net2280_stop(struct usb_gadget *_gadget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 	struct net2280	*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	unsigned long	flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 	dev = container_of(_gadget, struct net2280, gadget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 	spin_lock_irqsave(&dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 	stop_activity(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 	spin_unlock_irqrestore(&dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 	net2280_led_active(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	device_remove_file(&dev->pdev->dev, &dev_attr_function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 	device_remove_file(&dev->pdev->dev, &dev_attr_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 	dev->driver = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) /* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)  * also works for dma-capable endpoints, in pio mode or just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)  * to manually advance the queue after short OUT transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) static void handle_ep_small(struct net2280_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 	struct net2280_request	*req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 	u32			t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	/* 0 error, 1 mid-data, 2 done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	int			mode = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 	if (!list_empty(&ep->queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 		req = list_entry(ep->queue.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 			struct net2280_request, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 		req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 	/* ack all, and handle what we care about */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	t = readl(&ep->regs->ep_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	ep->irqs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 	ep_vdbg(ep->dev, "%s ack ep_stat %08x, req %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 			ep->ep.name, t, req ? &req->req : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 	if (!ep->is_in || (ep->dev->quirks & PLX_2280))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 		writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 		/* Added for 2282 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 		writel(t, &ep->regs->ep_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 	/* for ep0, monitor token irqs to catch data stage length errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 	 * and to synchronize on status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 	 * also, to defer reporting of protocol stalls ... here's where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	 * data or status first appears, handling stalls here should never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 	 * cause trouble on the host side..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 	 * control requests could be slightly faster without token synch for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 	 * status, but status can jam up that way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 	if (unlikely(ep->num == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 		if (ep->is_in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 			/* status; stop NAKing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 			if (t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 				if (ep->dev->protocol_stall) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 					ep->stopped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 					set_halt(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 				if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 					allow_status(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 				mode = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 			/* reply to extra IN data tokens with a zlp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 			} else if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 				if (ep->dev->protocol_stall) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 					ep->stopped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 					set_halt(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 					mode = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 				} else if (ep->responded &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 						!req && !ep->stopped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 					write_fifo(ep, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 			/* status; stop NAKing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 			if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 				if (ep->dev->protocol_stall) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 					ep->stopped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 					set_halt(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 				mode = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 			/* an extra OUT token is an error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 			} else if (((t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 					req &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 					req->req.actual == req->req.length) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 					(ep->responded && !req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 				ep->dev->protocol_stall = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 				set_halt(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 				ep->stopped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 				if (req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 					done(ep, req, -EOVERFLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 				req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	if (unlikely(!req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 	/* manual DMA queue advance after short OUT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 	if (likely(ep->dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 		if (t & BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 			struct net2280_request *stuck_req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 			int	stopped = ep->stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 			int	num_completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 			int	stuck = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 			u32	count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 			/* TRANSFERRED works around OUT_DONE erratum 0112.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 			 * we expect (N <= maxpacket) bytes; host wrote M.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 			 * iff (M < N) we won't ever see a DMA interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 			ep->stopped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 			for (count = 0; ; t = readl(&ep->regs->ep_stat)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 				/* any preceding dma transfers must finish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 				 * dma handles (M >= N), may empty the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 				num_completed = scan_dma_completions(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 				if (unlikely(list_empty(&ep->queue) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 						ep->out_overflow)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 					req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 				req = list_entry(ep->queue.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 					struct net2280_request, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 				/* here either (M < N), a "real" short rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 				 * or (M == N) and the queue didn't empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 				if (likely(t & BIT(FIFO_EMPTY))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 					count = readl(&ep->dma->dmacount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 					count &= DMA_BYTE_COUNT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 					if (readl(&ep->dma->dmadesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 							!= req->td_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 						req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 				/* Escape loop if no dma transfers completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 				 * after few retries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 				if (num_completed == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 					if (stuck_req == req &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 					    readl(&ep->dma->dmadesc) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 						  req->td_dma && stuck++ > 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 						count = readl(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 							&ep->dma->dmacount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 						count &= DMA_BYTE_COUNT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 						req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 						ep_dbg(ep->dev, "%s escape stuck %d, count %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 							ep->ep.name, stuck,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 							count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 						break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 					} else if (stuck_req != req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 						stuck_req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 						stuck = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 					}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 				} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 					stuck_req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 					stuck = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 				udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 			/* stop DMA, leave ep NAKing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 			writel(BIT(DMA_ABORT), &ep->dma->dmastat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 			spin_stop_dma(ep->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 			if (likely(req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 				req->td->dmacount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 				t = readl(&ep->regs->ep_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 				dma_done(ep, req, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 					(ep->out_overflow || t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 						? -EOVERFLOW : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 			/* also flush to prevent erratum 0106 trouble */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 			if (unlikely(ep->out_overflow ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 					(ep->dev->chiprev == 0x0100 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 					ep->dev->gadget.speed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 					== USB_SPEED_FULL))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 				out_flush(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 				ep->out_overflow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 			/* (re)start dma if needed, stop NAKing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 			ep->stopped = stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 			if (!list_empty(&ep->queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 				restart_dma(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 			ep_dbg(ep->dev, "%s dma ep_stat %08x ??\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 					ep->ep.name, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 	/* data packet(s) received (in the fifo, OUT) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 	} else if (t & BIT(DATA_PACKET_RECEIVED_INTERRUPT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 		if (read_fifo(ep, req) && ep->num != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 			mode = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 	/* data packet(s) transmitted (IN) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 	} else if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 		unsigned	len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 		len = req->req.length - req->req.actual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 		if (len > ep->ep.maxpacket)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 			len = ep->ep.maxpacket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 		req->req.actual += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 		/* if we wrote it all, we're usually done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 		/* send zlps until the status stage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 		if ((req->req.actual == req->req.length) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 			(!req->req.zero || len != ep->ep.maxpacket) && ep->num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 				mode = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 	/* there was nothing to do ...  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 	} else if (mode == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 	/* done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 	if (mode == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 		/* stream endpoints often resubmit/unlink in completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 		done(ep, req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 		/* maybe advance queue to next request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 		if (ep->num == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 			/* NOTE:  net2280 could let gadget driver start the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 			 * status stage later. since not all controllers let
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 			 * them control that, the api doesn't (yet) allow it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 			if (!ep->stopped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 				allow_status(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 			req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 			if (!list_empty(&ep->queue) && !ep->stopped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 				req = list_entry(ep->queue.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 					struct net2280_request, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 				req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 			if (req && !ep->is_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 				stop_out_naking(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 	/* is there a buffer for the next packet?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 	 * for best streaming performance, make sure there is one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 	if (req && !ep->stopped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 		/* load IN fifo with next packet (may be zlp) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 		if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 			write_fifo(ep, &req->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) static struct net2280_ep *get_ep_by_addr(struct net2280 *dev, u16 wIndex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 	struct net2280_ep	*ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 	if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 		return &dev->ep[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 	list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 		u8	bEndpointAddress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 		if (!ep->desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 		bEndpointAddress = ep->desc->bEndpointAddress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 		if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 		if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 			return ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) static void defect7374_workaround(struct net2280 *dev, struct usb_ctrlrequest r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 	u32 scratch, fsmvalue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 	u32 ack_wait_timeout, state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 	/* Workaround for Defect 7374 (U1/U2 erroneously rejected): */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 	scratch = get_idx_reg(dev->regs, SCRATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 	fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 	scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 	if (!((fsmvalue == DEFECT7374_FSM_WAITING_FOR_CONTROL_READ) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 				(r.bRequestType & USB_DIR_IN)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 	/* This is the first Control Read for this connection: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 	if (!(readl(&dev->usb->usbstat) & BIT(SUPER_SPEED_MODE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 		 * Connection is NOT SS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 		 * - Connection must be FS or HS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 		 * - This FSM state should allow workaround software to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 		 * run after the next USB connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 		scratch |= DEFECT7374_FSM_NON_SS_CONTROL_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 		dev->bug7734_patched = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 		goto restore_data_eps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 	/* Connection is SS: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 	for (ack_wait_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 			ack_wait_timeout < DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 			ack_wait_timeout++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 		state =	readl(&dev->plregs->pl_ep_status_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 			& (0xff << STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 		if ((state >= (ACK_GOOD_NORMAL << STATE)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 			(state <= (ACK_GOOD_MORE_ACKS_TO_COME << STATE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 			scratch |= DEFECT7374_FSM_SS_CONTROL_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 			dev->bug7734_patched = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 		 * We have not yet received host's Data Phase ACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 		 * - Wait and try again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 		udelay(DEFECT_7374_PROCESSOR_WAIT_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 		continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 	if (ack_wait_timeout >= DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 		ep_err(dev, "FAIL: Defect 7374 workaround waited but failed "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 		"to detect SS host's data phase ACK.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 		ep_err(dev, "PL_EP_STATUS_1(23:16):.Expected from 0x11 to 0x16"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 		"got 0x%2.2x.\n", state >> STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 		ep_warn(dev, "INFO: Defect 7374 workaround waited about\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 		"%duSec for Control Read Data Phase ACK\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 			DEFECT_7374_PROCESSOR_WAIT_TIME * ack_wait_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) restore_data_eps:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 	 * Restore data EPs to their pre-workaround settings (disabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 	 * initialized, and other details).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 	defect7374_disable_data_eps(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 	set_idx_reg(dev->regs, SCRATCH, scratch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) static void ep_clear_seqnum(struct net2280_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 	struct net2280 *dev = ep->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 	static const u32 ep_pl[9] = { 0, 3, 4, 7, 8, 2, 5, 6, 9 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 	val = readl(&dev->plregs->pl_ep_ctrl) & ~0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 	val |= ep_pl[ep->num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 	writel(val, &dev->plregs->pl_ep_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 	val |= BIT(SEQUENCE_NUMBER_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 	writel(val, &dev->plregs->pl_ep_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) static void handle_stat0_irqs_superspeed(struct net2280 *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 		struct net2280_ep *ep, struct usb_ctrlrequest r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 	struct net2280_ep *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 	u16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 	int tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) #define	w_value		le16_to_cpu(r.wValue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) #define	w_index		le16_to_cpu(r.wIndex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) #define	w_length	le16_to_cpu(r.wLength)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 	switch (r.bRequest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 	case USB_REQ_SET_CONFIGURATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 		dev->addressed_state = !w_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 		goto usb3_delegate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 	case USB_REQ_GET_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 		switch (r.bRequestType) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 		case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 			status = dev->wakeup_enable ? 0x02 : 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 			if (dev->gadget.is_selfpowered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 				status |= BIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 			status |= (dev->u1_enable << 2 | dev->u2_enable << 3 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 							dev->ltm_enable << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 			writel(0, &dev->epregs[0].ep_irqenb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 			set_fifo_bytecount(ep, sizeof(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 			writel((__force u32) status, &dev->epregs[0].ep_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 			allow_status_338x(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 		case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 			e = get_ep_by_addr(dev, w_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 			if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 				goto do_stall3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 			status = readl(&e->regs->ep_rsp) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 						BIT(CLEAR_ENDPOINT_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 			writel(0, &dev->epregs[0].ep_irqenb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 			set_fifo_bytecount(ep, sizeof(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 			writel((__force u32) status, &dev->epregs[0].ep_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 			allow_status_338x(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 			goto usb3_delegate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 	case USB_REQ_CLEAR_FEATURE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 		switch (r.bRequestType) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 		case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 			if (!dev->addressed_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 				switch (w_value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 				case USB_DEVICE_U1_ENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 					dev->u1_enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 					writel(readl(&dev->usb_ext->usbctl2) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 						~BIT(U1_ENABLE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 						&dev->usb_ext->usbctl2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 					allow_status_338x(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 					goto next_endpoints3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 				case USB_DEVICE_U2_ENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 					dev->u2_enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 					writel(readl(&dev->usb_ext->usbctl2) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 						~BIT(U2_ENABLE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 						&dev->usb_ext->usbctl2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 					allow_status_338x(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 					goto next_endpoints3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 				case USB_DEVICE_LTM_ENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 					dev->ltm_enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 					writel(readl(&dev->usb_ext->usbctl2) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 						~BIT(LTM_ENABLE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 						&dev->usb_ext->usbctl2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 					allow_status_338x(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 					goto next_endpoints3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 				default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 			if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 				dev->wakeup_enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 				writel(readl(&dev->usb->usbctl) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 					~BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 					&dev->usb->usbctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 				allow_status_338x(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 			goto usb3_delegate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 		case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 			e = get_ep_by_addr(dev,	w_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 			if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 				goto do_stall3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 			if (w_value != USB_ENDPOINT_HALT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 				goto do_stall3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 			ep_vdbg(dev, "%s clear halt\n", e->ep.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 			 * Workaround for SS SeqNum not cleared via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 			 * Endpoint Halt (Clear) bit. select endpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 			ep_clear_seqnum(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 			clear_halt(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 			if (!list_empty(&e->queue) && e->td_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 				restart_dma(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 			allow_status(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 			ep->stopped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 			goto usb3_delegate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 	case USB_REQ_SET_FEATURE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 		switch (r.bRequestType) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 		case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 			if (!dev->addressed_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 				switch (w_value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 				case USB_DEVICE_U1_ENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 					dev->u1_enable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 					writel(readl(&dev->usb_ext->usbctl2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 						BIT(U1_ENABLE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 						&dev->usb_ext->usbctl2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 					allow_status_338x(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 					goto next_endpoints3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 				case USB_DEVICE_U2_ENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 					dev->u2_enable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 					writel(readl(&dev->usb_ext->usbctl2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 						BIT(U2_ENABLE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 						&dev->usb_ext->usbctl2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 					allow_status_338x(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 					goto next_endpoints3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 				case USB_DEVICE_LTM_ENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 					dev->ltm_enable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 					writel(readl(&dev->usb_ext->usbctl2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 						BIT(LTM_ENABLE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 						&dev->usb_ext->usbctl2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 					allow_status_338x(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 					goto next_endpoints3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 				default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 			if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 				dev->wakeup_enable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 				writel(readl(&dev->usb->usbctl) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 					BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 					&dev->usb->usbctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 				allow_status_338x(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 			goto usb3_delegate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 		case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 			e = get_ep_by_addr(dev,	w_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 			if (!e || (w_value != USB_ENDPOINT_HALT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 				goto do_stall3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 			ep->stopped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 			if (ep->num == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 				ep->dev->protocol_stall = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 			else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 				if (ep->dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 					abort_dma(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 				set_halt(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 			allow_status_338x(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 			goto usb3_delegate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) usb3_delegate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 		ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x ep_cfg %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 				r.bRequestType, r.bRequest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 				w_value, w_index, w_length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 				readl(&ep->cfg->ep_cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 		ep->responded = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 		spin_unlock(&dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 		tmp = dev->driver->setup(&dev->gadget, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 		spin_lock(&dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) do_stall3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 	if (tmp < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 		ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 				r.bRequestType, r.bRequest, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 		dev->protocol_stall = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 		/* TD 9.9 Halt Endpoint test. TD 9.22 Set feature test */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 		set_halt(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) next_endpoints3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) #undef	w_value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) #undef	w_index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) #undef	w_length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) static void usb338x_handle_ep_intr(struct net2280 *dev, u32 stat0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 	u32 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 	u32 bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 	for (index = 0; index < ARRAY_SIZE(ep_bit); index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 		bit = BIT(ep_bit[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 		if (!stat0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 		if (!(stat0 & bit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 		stat0 &= ~bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 		handle_ep_small(&dev->ep[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 	struct net2280_ep	*ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 	u32			num, scratch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 	/* most of these don't need individual acks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 	stat &= ~BIT(INTA_ASSERTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 	if (!stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 	/* ep_dbg(dev, "irqstat0 %04x\n", stat); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) 	/* starting a control request? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) 	if (unlikely(stat & BIT(SETUP_PACKET_INTERRUPT))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 		union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) 			u32			raw[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 			struct usb_ctrlrequest	r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 		} u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 		int				tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 		struct net2280_request		*req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 		if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 			u32 val = readl(&dev->usb->usbstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 			if (val & BIT(SUPER_SPEED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 				dev->gadget.speed = USB_SPEED_SUPER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 				usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 						EP0_SS_MAX_PACKET_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 			} else if (val & BIT(HIGH_SPEED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 				dev->gadget.speed = USB_SPEED_HIGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 				usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 						EP0_HS_MAX_PACKET_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 				dev->gadget.speed = USB_SPEED_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 				usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 						EP0_HS_MAX_PACKET_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 			net2280_led_speed(dev, dev->gadget.speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 			ep_dbg(dev, "%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 					usb_speed_string(dev->gadget.speed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 		ep = &dev->ep[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 		ep->irqs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 		/* make sure any leftover request state is cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 		stat &= ~BIT(ENDPOINT_0_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 		while (!list_empty(&ep->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 			req = list_entry(ep->queue.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 					struct net2280_request, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 			done(ep, req, (req->req.actual == req->req.length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 						? 0 : -EPROTO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 		ep->stopped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 		dev->protocol_stall = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 		if (!(dev->quirks & PLX_PCIE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 			if (ep->dev->quirks & PLX_2280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 				tmp = BIT(FIFO_OVERFLOW) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 				    BIT(FIFO_UNDERFLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 				tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 			writel(tmp | BIT(TIMEOUT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 				   BIT(USB_STALL_SENT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 				   BIT(USB_IN_NAK_SENT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 				   BIT(USB_IN_ACK_RCVD) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 				   BIT(USB_OUT_PING_NAK_SENT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 				   BIT(USB_OUT_ACK_SENT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 				   BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 				   BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 				   BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 				   BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 				   BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 				   BIT(DATA_IN_TOKEN_INTERRUPT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 				   &ep->regs->ep_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 		u.raw[0] = readl(&dev->usb->setup0123);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 		u.raw[1] = readl(&dev->usb->setup4567);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 		cpu_to_le32s(&u.raw[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 		cpu_to_le32s(&u.raw[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 		if ((dev->quirks & PLX_PCIE) && !dev->bug7734_patched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 			defect7374_workaround(dev, u.r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 		tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) #define	w_value		le16_to_cpu(u.r.wValue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) #define	w_index		le16_to_cpu(u.r.wIndex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) #define	w_length	le16_to_cpu(u.r.wLength)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 		/* ack the irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 		writel(BIT(SETUP_PACKET_INTERRUPT), &dev->regs->irqstat0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 		stat ^= BIT(SETUP_PACKET_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 		/* watch control traffic at the token level, and force
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 		 * synchronization before letting the status stage happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 		 * FIXME ignore tokens we'll NAK, until driver responds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 		 * that'll mean a lot less irqs for some drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 		ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 		if (ep->is_in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 			scratch = BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 				BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 				BIT(DATA_IN_TOKEN_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 			stop_out_naking(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 			scratch = BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 				BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 				BIT(DATA_IN_TOKEN_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 		writel(scratch, &dev->epregs[0].ep_irqenb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 		/* we made the hardware handle most lowlevel requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 		 * everything else goes uplevel to the gadget code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 		ep->responded = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 		if (dev->gadget.speed == USB_SPEED_SUPER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 			handle_stat0_irqs_superspeed(dev, ep, u.r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 			goto next_endpoints;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 		switch (u.r.bRequest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 		case USB_REQ_GET_STATUS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 			struct net2280_ep	*e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 			__le32			status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 			/* hw handles device and interface status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 			if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 				goto delegate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 			e = get_ep_by_addr(dev, w_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 			if (!e || w_length > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 				goto do_stall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 			if (readl(&e->regs->ep_rsp) & BIT(SET_ENDPOINT_HALT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 				status = cpu_to_le32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 				status = cpu_to_le32(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 			/* don't bother with a request object! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 			writel(0, &dev->epregs[0].ep_irqenb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 			set_fifo_bytecount(ep, w_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 			writel((__force u32)status, &dev->epregs[0].ep_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 			allow_status(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 			ep_vdbg(dev, "%s stat %02x\n", ep->ep.name, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 			goto next_endpoints;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 		case USB_REQ_CLEAR_FEATURE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) 			struct net2280_ep	*e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 			/* hw handles device features */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 			if (u.r.bRequestType != USB_RECIP_ENDPOINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 				goto delegate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 			if (w_value != USB_ENDPOINT_HALT || w_length != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) 				goto do_stall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 			e = get_ep_by_addr(dev, w_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 			if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 				goto do_stall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 			if (e->wedged) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 				ep_vdbg(dev, "%s wedged, halt not cleared\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 						ep->ep.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 				ep_vdbg(dev, "%s clear halt\n", e->ep.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 				clear_halt(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 				if ((ep->dev->quirks & PLX_PCIE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 					!list_empty(&e->queue) && e->td_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) 						restart_dma(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 			allow_status(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 			goto next_endpoints;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 		case USB_REQ_SET_FEATURE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 			struct net2280_ep	*e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 			/* hw handles device features */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 			if (u.r.bRequestType != USB_RECIP_ENDPOINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 				goto delegate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 			if (w_value != USB_ENDPOINT_HALT || w_length != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 				goto do_stall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) 			e = get_ep_by_addr(dev, w_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 			if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 				goto do_stall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 			if (e->ep.name == ep0name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 				goto do_stall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 			set_halt(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 			if ((dev->quirks & PLX_PCIE) && e->dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 				abort_dma(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 			allow_status(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 			ep_vdbg(dev, "%s set halt\n", ep->ep.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 			goto next_endpoints;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) delegate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 			ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 				"ep_cfg %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 				u.r.bRequestType, u.r.bRequest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 				w_value, w_index, w_length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 				readl(&ep->cfg->ep_cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 			ep->responded = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 			spin_unlock(&dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 			tmp = dev->driver->setup(&dev->gadget, &u.r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 			spin_lock(&dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 		/* stall ep0 on error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 		if (tmp < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) do_stall:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 			ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 					u.r.bRequestType, u.r.bRequest, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 			dev->protocol_stall = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 		/* some in/out token irq should follow; maybe stall then.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) 		 * driver must queue a request (even zlp) or halt ep0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 		 * before the host times out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) #undef	w_value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) #undef	w_index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) #undef	w_length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) next_endpoints:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 	if ((dev->quirks & PLX_PCIE) && dev->enhanced_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) 		u32 mask = (BIT(ENDPOINT_0_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 			USB3380_IRQSTAT0_EP_INTR_MASK_IN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) 			USB3380_IRQSTAT0_EP_INTR_MASK_OUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) 		if (stat & mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 			usb338x_handle_ep_intr(dev, stat & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) 			stat &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 		/* endpoint data irq ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 		scratch = stat & 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 		stat &= ~0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 		for (num = 0; scratch; num++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 			u32		t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 			/* do this endpoint's FIFO and queue need tending? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 			t = BIT(num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 			if ((scratch & t) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 			scratch ^= t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 			ep = &dev->ep[num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 			handle_ep_small(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 	if (stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 		ep_dbg(dev, "unhandled irqstat0 %08x\n", stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) #define DMA_INTERRUPTS (BIT(DMA_D_INTERRUPT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) 		BIT(DMA_C_INTERRUPT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) 		BIT(DMA_B_INTERRUPT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) 		BIT(DMA_A_INTERRUPT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) #define	PCI_ERROR_INTERRUPTS ( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) 		BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 		BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) 		BIT(PCI_RETRY_ABORT_INTERRUPT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) __releases(dev->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) __acquires(dev->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) 	struct net2280_ep	*ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) 	u32			tmp, num, mask, scratch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) 	/* after disconnect there's nothing else to do! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 	tmp = BIT(VBUS_INTERRUPT) | BIT(ROOT_PORT_RESET_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) 	mask = BIT(SUPER_SPEED) | BIT(HIGH_SPEED) | BIT(FULL_SPEED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 	/* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 	 * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 	 * both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 	 * only indicates a change in the reset state).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) 	if (stat & tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) 		bool	reset = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) 		bool	disconnect = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) 		 * Ignore disconnects and resets if the speed hasn't been set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) 		 * VBUS can bounce and there's always an initial reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) 		writel(tmp, &dev->regs->irqstat1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) 		if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) 			if ((stat & BIT(VBUS_INTERRUPT)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) 					(readl(&dev->usb->usbctl) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 						BIT(VBUS_PIN)) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 				disconnect = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 				ep_dbg(dev, "disconnect %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 						dev->driver->driver.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 			} else if ((stat & BIT(ROOT_PORT_RESET_INTERRUPT)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 					(readl(&dev->usb->usbstat) & mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 						== 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 				reset = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 				ep_dbg(dev, "reset %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 						dev->driver->driver.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 			if (disconnect || reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) 				stop_activity(dev, dev->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 				ep0_start(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 				spin_unlock(&dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 				if (reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 					usb_gadget_udc_reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) 						(&dev->gadget, dev->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 					(dev->driver->disconnect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 						(&dev->gadget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 				spin_lock(&dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 		stat &= ~tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 		/* vBUS can bounce ... one of many reasons to ignore the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 		 * notion of hotplug events on bus connect/disconnect!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) 		if (!stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 	/* NOTE: chip stays in PCI D0 state for now, but it could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 	 * enter D1 to save more power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) 	tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) 	if (stat & tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) 		writel(tmp, &dev->regs->irqstat1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 		spin_unlock(&dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) 		if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) 			if (dev->driver->suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) 				dev->driver->suspend(&dev->gadget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) 			if (!enable_suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) 				stat &= ~BIT(SUSPEND_REQUEST_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) 			if (dev->driver->resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) 				dev->driver->resume(&dev->gadget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) 			/* at high speed, note erratum 0133 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 		spin_lock(&dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) 		stat &= ~tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) 	/* clear any other status/irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) 	if (stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 		writel(stat, &dev->regs->irqstat1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) 	/* some status we can just ignore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) 	if (dev->quirks & PLX_2280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 		stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) 			  BIT(SUSPEND_REQUEST_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 			  BIT(RESUME_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) 			  BIT(SOF_INTERRUPT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) 		stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) 			  BIT(RESUME_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) 			  BIT(SOF_DOWN_INTERRUPT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) 			  BIT(SOF_INTERRUPT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) 	if (!stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) 	/* ep_dbg(dev, "irqstat1 %08x\n", stat);*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) 	/* DMA status, for ep-{a,b,c,d} */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) 	scratch = stat & DMA_INTERRUPTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) 	stat &= ~DMA_INTERRUPTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 	scratch >>= 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) 	for (num = 0; scratch; num++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) 		struct net2280_dma_regs	__iomem *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) 		tmp = BIT(num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 		if ((tmp & scratch) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) 		scratch ^= tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 		ep = &dev->ep[num + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 		dma = ep->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 		if (!dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 		/* clear ep's dma status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) 		tmp = readl(&dma->dmastat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 		writel(tmp, &dma->dmastat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 		/* dma sync*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) 		if (dev->quirks & PLX_PCIE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) 			u32 r_dmacount = readl(&dma->dmacount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) 			if (!ep->is_in &&  (r_dmacount & 0x00FFFFFF) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) 			    (tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) 		if (!(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) 			ep_dbg(ep->dev, "%s no xact done? %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) 				ep->ep.name, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 		stop_dma(ep->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 		/* OUT transfers terminate when the data from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) 		 * host is in our memory.  Process whatever's done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) 		 * On this path, we know transfer's last packet wasn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) 		 * less than req->length. NAK_OUT_PACKETS may be set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) 		 * or the FIFO may already be holding new packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) 		 * IN transfers can linger in the FIFO for a very
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) 		 * long time ... we ignore that for now, accounting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 		 * precisely (like PIO does) needs per-packet irqs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 		scan_dma_completions(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) 		/* disable dma on inactive queues; else maybe restart */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 		if (!list_empty(&ep->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) 			tmp = readl(&dma->dmactl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 			restart_dma(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) 		ep->irqs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 	/* NOTE:  there are other PCI errors we might usefully notice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) 	 * if they appear very often, here's where to try recovering.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 	if (stat & PCI_ERROR_INTERRUPTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) 		ep_err(dev, "pci dma error; stat %08x\n", stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) 		stat &= ~PCI_ERROR_INTERRUPTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) 		/* these are fatal errors, but "maybe" they won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) 		 * happen again ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 		stop_activity(dev, dev->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 		ep0_start(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 		stat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 	if (stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 		ep_dbg(dev, "unhandled irqstat1 %08x\n", stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) static irqreturn_t net2280_irq(int irq, void *_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) 	struct net2280		*dev = _dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) 	/* shared interrupt, not ours */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) 	if ((dev->quirks & PLX_LEGACY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) 		(!(readl(&dev->regs->irqstat0) & BIT(INTA_ASSERTED))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) 	spin_lock(&dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) 	/* handle disconnect, dma, and more */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) 	handle_stat1_irqs(dev, readl(&dev->regs->irqstat1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) 	/* control requests and PIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) 	handle_stat0_irqs(dev, readl(&dev->regs->irqstat0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) 	if (dev->quirks & PLX_PCIE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) 		/* re-enable interrupt to trigger any possible new interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) 		u32 pciirqenb1 = readl(&dev->regs->pciirqenb1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) 		writel(pciirqenb1 & 0x7FFFFFFF, &dev->regs->pciirqenb1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) 		writel(pciirqenb1, &dev->regs->pciirqenb1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) 	spin_unlock(&dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) static void gadget_release(struct device *_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) 	struct net2280	*dev = container_of(_dev, struct net2280, gadget.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) 	kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) /* tear down the binding between this driver and the pci device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) static void net2280_remove(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) 	struct net2280		*dev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) 	if (dev->added)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) 		usb_del_gadget(&dev->gadget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) 	BUG_ON(dev->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) 	/* then clean up the resources we allocated during probe() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 	if (dev->requests) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 		int		i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) 		for (i = 1; i < 5; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) 			if (!dev->ep[i].dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) 			dma_pool_free(dev->requests, dev->ep[i].dummy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) 					dev->ep[i].td_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) 		dma_pool_destroy(dev->requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) 	if (dev->got_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) 		free_irq(pdev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) 	if (dev->quirks & PLX_PCIE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) 		pci_disable_msi(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) 	if (dev->regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 		net2280_led_shutdown(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) 		iounmap(dev->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) 	if (dev->region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) 		release_mem_region(pci_resource_start(pdev, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 				pci_resource_len(pdev, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) 	if (dev->enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) 		pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) 	device_remove_file(&pdev->dev, &dev_attr_registers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) 	ep_info(dev, "unbind\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) 	usb_put_gadget(&dev->gadget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) /* wrap this driver around the specified device, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610)  * don't respond over USB until a gadget driver binds to us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) 	struct net2280		*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) 	unsigned long		resource, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) 	void			__iomem *base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) 	int			retval, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) 	/* alloc, and start init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) 	if (dev == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) 		retval = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) 	pci_set_drvdata(pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) 	usb_initialize_gadget(&pdev->dev, &dev->gadget, gadget_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) 	spin_lock_init(&dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) 	dev->quirks = id->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) 	dev->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) 	dev->gadget.ops = &net2280_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) 	dev->gadget.max_speed = (dev->quirks & PLX_SUPERSPEED) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) 				USB_SPEED_SUPER : USB_SPEED_HIGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) 	/* the "gadget" abstracts/virtualizes the controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) 	dev->gadget.name = driver_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) 	/* now all the pci goodies ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) 	if (pci_enable_device(pdev) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) 		retval = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) 	dev->enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) 	/* BAR 0 holds all the registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) 	 * BAR 1 is 8051 memory; unused here (note erratum 0103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) 	 * BAR 2 is fifo memory; unused here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) 	resource = pci_resource_start(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) 	len = pci_resource_len(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) 	if (!request_mem_region(resource, len, driver_name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) 		ep_dbg(dev, "controller already in use\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) 		retval = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) 	dev->region = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) 	/* FIXME provide firmware download interface to put
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) 	 * 8051 code into the chip, e.g. to turn on PCI PM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) 	base = ioremap(resource, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) 	if (base == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) 		ep_dbg(dev, "can't map memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) 		retval = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) 	dev->regs = (struct net2280_regs __iomem *) base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) 	dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) 	dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) 	dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) 	dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) 	dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) 	if (dev->quirks & PLX_PCIE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) 		u32 fsmvalue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) 		u32 usbstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) 		dev->usb_ext = (struct usb338x_usb_ext_regs __iomem *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) 							(base + 0x00b4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) 		dev->llregs = (struct usb338x_ll_regs __iomem *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) 							(base + 0x0700);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) 		dev->plregs = (struct usb338x_pl_regs __iomem *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) 							(base + 0x0800);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) 		usbstat = readl(&dev->usb->usbstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) 		dev->enhanced_mode = !!(usbstat & BIT(11));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) 		dev->n_ep = (dev->enhanced_mode) ? 9 : 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) 		/* put into initial config, link up all endpoints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) 		fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) 					(0xf << DEFECT7374_FSM_FIELD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) 		/* See if firmware needs to set up for workaround: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) 		if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) 			dev->bug7734_patched = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) 			writel(0, &dev->usb->usbctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) 			dev->bug7734_patched = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) 		dev->enhanced_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) 		dev->n_ep = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) 		/* put into initial config, link up all endpoints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) 		writel(0, &dev->usb->usbctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) 	usb_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) 	usb_reinit(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) 	/* irq setup after old hardware is cleaned up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) 	if (!pdev->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) 		ep_err(dev, "No IRQ.  Check PCI setup!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) 		retval = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) 	if (dev->quirks & PLX_PCIE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) 		if (pci_enable_msi(pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) 			ep_err(dev, "Failed to enable MSI mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) 	if (request_irq(pdev->irq, net2280_irq, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) 							driver_name, dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) 		ep_err(dev, "request interrupt %d failed\n", pdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) 		retval = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) 	dev->got_irq = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) 	/* DMA setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) 	/* NOTE:  we know only the 32 LSBs of dma addresses may be nonzero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) 	dev->requests = dma_pool_create("requests", &pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) 		sizeof(struct net2280_dma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) 		0 /* no alignment requirements */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) 		0 /* or page-crossing issues */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) 	if (!dev->requests) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) 		ep_dbg(dev, "can't get request pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) 		retval = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) 	for (i = 1; i < 5; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) 		struct net2280_dma	*td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) 		td = dma_pool_alloc(dev->requests, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) 				&dev->ep[i].td_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) 		if (!td) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) 			ep_dbg(dev, "can't get dummy %d\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) 			retval = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) 		td->dmacount = 0;	/* not VALID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) 		td->dmadesc = td->dmaaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) 		dev->ep[i].dummy = td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) 	/* enable lower-overhead pci memory bursts during DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) 	if (dev->quirks & PLX_LEGACY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) 		writel(BIT(DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) 			 * 256 write retries may not be enough...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) 			   BIT(PCI_RETRY_ABORT_ENABLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) 			*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) 			BIT(DMA_READ_MULTIPLE_ENABLE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) 			BIT(DMA_READ_LINE_ENABLE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) 			&dev->pci->pcimstctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) 	/* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) 	pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) 	pci_try_set_mwi(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) 	/* ... also flushes any posted pci writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) 	dev->chiprev = get_idx_reg(dev->regs, REG_CHIPREV) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) 	/* done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) 	ep_info(dev, "%s\n", driver_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) 	ep_info(dev, "irq %d, pci mem %p, chip rev %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) 			pdev->irq, base, dev->chiprev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) 	ep_info(dev, "version: " DRIVER_VERSION "; %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) 		dev->enhanced_mode ? "enhanced mode" : "legacy mode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) 	retval = device_create_file(&pdev->dev, &dev_attr_registers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) 	retval = usb_add_gadget(&dev->gadget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) 	dev->added = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) 	if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) 		net2280_remove(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) 		kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) /* make sure the board is quiescent; otherwise it will continue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794)  * generating IRQs across the upcoming reboot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) static void net2280_shutdown(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) 	struct net2280		*dev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) 	/* disable IRQs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) 	writel(0, &dev->regs->pciirqenb0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) 	writel(0, &dev->regs->pciirqenb1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) 	/* disable the pullup so the host will think we're gone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) 	writel(0, &dev->usb->usbctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) static const struct pci_device_id pci_ids[] = { {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) 	.class =	PCI_CLASS_SERIAL_USB_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) 	.class_mask =	~0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) 	.vendor =	PCI_VENDOR_ID_PLX_LEGACY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) 	.device =	0x2280,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) 	.subvendor =	PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) 	.subdevice =	PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) 	.driver_data =	PLX_LEGACY | PLX_2280,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) 	}, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) 	.class =	PCI_CLASS_SERIAL_USB_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) 	.class_mask =	~0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) 	.vendor =	PCI_VENDOR_ID_PLX_LEGACY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) 	.device =	0x2282,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) 	.subvendor =	PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) 	.subdevice =	PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) 	.driver_data =	PLX_LEGACY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) 	.class =	PCI_CLASS_SERIAL_USB_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) 	.class_mask =	~0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) 	.vendor =	PCI_VENDOR_ID_PLX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) 	.device =	0x2380,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) 	.subvendor =	PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) 	.subdevice =	PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) 	.driver_data =	PLX_PCIE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) 	.class =	((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) 	.class_mask =	~0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) 	.vendor =	PCI_VENDOR_ID_PLX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) 	.device =	0x3380,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) 	.subvendor =	PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) 	.subdevice =	PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) 	.driver_data =	PLX_PCIE | PLX_SUPERSPEED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) 	.class =	PCI_CLASS_SERIAL_USB_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) 	.class_mask =	~0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) 	.vendor =	PCI_VENDOR_ID_PLX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) 	.device =	0x3382,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) 	.subvendor =	PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) 	.subdevice =	PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) 	.driver_data =	PLX_PCIE | PLX_SUPERSPEED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) { /* end: all zeroes */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) MODULE_DEVICE_TABLE(pci, pci_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) /* pci driver glue; this is a "new style" PCI driver module */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) static struct pci_driver net2280_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) 	.name =		driver_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) 	.id_table =	pci_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) 	.probe =	net2280_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) 	.remove =	net2280_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) 	.shutdown =	net2280_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) 	/* FIXME add power management support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) module_pci_driver(net2280_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) MODULE_DESCRIPTION(DRIVER_DESC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) MODULE_AUTHOR("David Brownell");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) MODULE_LICENSE("GPL");