Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (c) 2008 Rodolfo Giometti <giometti@linux.it>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (c) 2008 Eurotech S.p.A. <info@eurtech.it>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * This code is *strongly* based on EHCI-HCD code by David Brownell since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * the chip is a quasi-EHCI compatible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/usb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/usb/hcd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #define DRIVER_VERSION "0.0.50"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #define OXU_DEVICEID			0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	#define OXU_REV_MASK		0xffff0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	#define OXU_REV_SHIFT		16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	#define OXU_REV_2100		0x2100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	#define OXU_BO_SHIFT		8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	#define OXU_BO_MASK		(0x3 << OXU_BO_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	#define OXU_MAJ_REV_SHIFT	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	#define OXU_MAJ_REV_MASK	(0xf << OXU_MAJ_REV_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	#define OXU_MIN_REV_SHIFT	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	#define OXU_MIN_REV_MASK	(0xf << OXU_MIN_REV_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define OXU_HOSTIFCONFIG		0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define OXU_SOFTRESET			0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	#define OXU_SRESET		(1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define OXU_PIOBURSTREADCTRL		0x0C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define OXU_CHIPIRQSTATUS		0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define OXU_CHIPIRQEN_SET		0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define OXU_CHIPIRQEN_CLR		0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	#define OXU_USBSPHLPWUI		0x00000080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	#define OXU_USBOTGLPWUI		0x00000040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	#define OXU_USBSPHI		0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	#define OXU_USBOTGI		0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define OXU_CLKCTRL_SET			0x1C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	#define OXU_SYSCLKEN		0x00000008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	#define OXU_USBSPHCLKEN		0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	#define OXU_USBOTGCLKEN		0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #define OXU_ASO				0x68
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	#define OXU_SPHPOEN		0x00000100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	#define OXU_OVRCCURPUPDEN	0x00000800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	#define OXU_ASO_OP		(1 << 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	#define OXU_COMPARATOR		0x000004000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #define OXU_USBMODE			0x1A8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	#define OXU_VBPS		0x00000020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	#define OXU_ES_LITTLE		0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	#define OXU_CM_HOST_ONLY	0x00000003
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78)  * Proper EHCI structs & defines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) /* Magic numbers that can affect system performance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #define EHCI_TUNE_CERR		3	/* 0-3 qtd retries; 0 == don't stop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #define EHCI_TUNE_RL_HS		4	/* nak throttle; see 4.9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #define EHCI_TUNE_RL_TT		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #define EHCI_TUNE_MULT_HS	1	/* 1-3 transactions/uframe; 4.10.3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) #define EHCI_TUNE_MULT_TT	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #define EHCI_TUNE_FLS		2	/* (small) 256 frame schedule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) struct oxu_hcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) /* EHCI register interface, corresponds to EHCI Revision 0.95 specification */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) /* Section 2.2 Host Controller Capability Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) struct ehci_caps {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	/* these fields are specified as 8 and 16 bit registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	 * but some hosts can't perform 8 or 16 bit PCI accesses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	u32		hc_capbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) #define HC_LENGTH(p)		(((p)>>00)&0x00ff)	/* bits 7:0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) #define HC_VERSION(p)		(((p)>>16)&0xffff)	/* bits 31:16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	u32		hcs_params;     /* HCSPARAMS - offset 0x4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) #define HCS_DEBUG_PORT(p)	(((p)>>20)&0xf)	/* bits 23:20, debug port? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) #define HCS_INDICATOR(p)	((p)&(1 << 16))	/* true: has port indicators */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) #define HCS_N_CC(p)		(((p)>>12)&0xf)	/* bits 15:12, #companion HCs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) #define HCS_N_PCC(p)		(((p)>>8)&0xf)	/* bits 11:8, ports per CC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) #define HCS_PORTROUTED(p)	((p)&(1 << 7))	/* true: port routing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) #define HCS_PPC(p)		((p)&(1 << 4))	/* true: port power control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) #define HCS_N_PORTS(p)		(((p)>>0)&0xf)	/* bits 3:0, ports on HC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	u32		hcc_params;      /* HCCPARAMS - offset 0x8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) #define HCC_EXT_CAPS(p)		(((p)>>8)&0xff)	/* for pci extended caps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) #define HCC_ISOC_CACHE(p)       ((p)&(1 << 7))  /* true: can cache isoc frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) #define HCC_ISOC_THRES(p)       (((p)>>4)&0x7)  /* bits 6:4, uframes cached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) #define HCC_CANPARK(p)		((p)&(1 << 2))  /* true: can park on async qh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) #define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1))  /* true: periodic_size changes*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) #define HCC_64BIT_ADDR(p)       ((p)&(1))       /* true: can use 64-bit addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	u8		portroute[8];	 /* nibbles for routing - offset 0xC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) /* Section 2.3 Host Controller Operational Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) struct ehci_regs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	/* USBCMD: offset 0x00 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	u32		command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) /* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) #define CMD_PARK	(1<<11)		/* enable "park" on async qh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) #define CMD_PARK_CNT(c)	(((c)>>8)&3)	/* how many transfers to park for */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) #define CMD_LRESET	(1<<7)		/* partial reset (no ports, etc) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) #define CMD_IAAD	(1<<6)		/* "doorbell" interrupt async advance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) #define CMD_ASE		(1<<5)		/* async schedule enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) #define CMD_PSE		(1<<4)		/* periodic schedule enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) /* 3:2 is periodic frame list size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) #define CMD_RESET	(1<<1)		/* reset HC not bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) #define CMD_RUN		(1<<0)		/* start/stop HC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	/* USBSTS: offset 0x04 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	u32		status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) #define STS_ASS		(1<<15)		/* Async Schedule Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) #define STS_PSS		(1<<14)		/* Periodic Schedule Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) #define STS_RECL	(1<<13)		/* Reclamation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) #define STS_HALT	(1<<12)		/* Not running (any reason) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) /* some bits reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	/* these STS_* flags are also intr_enable bits (USBINTR) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) #define STS_IAA		(1<<5)		/* Interrupted on async advance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) #define STS_FATAL	(1<<4)		/* such as some PCI access errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) #define STS_FLR		(1<<3)		/* frame list rolled over */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) #define STS_PCD		(1<<2)		/* port change detect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) #define STS_ERR		(1<<1)		/* "error" completion (overflow, ...) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) #define STS_INT		(1<<0)		/* "normal" completion (short, ...) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) #define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	/* USBINTR: offset 0x08 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	u32		intr_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	/* FRINDEX: offset 0x0C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	u32		frame_index;	/* current microframe number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	/* CTRLDSSEGMENT: offset 0x10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	u32		segment;	/* address bits 63:32 if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	/* PERIODICLISTBASE: offset 0x14 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	u32		frame_list;	/* points to periodic list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	/* ASYNCLISTADDR: offset 0x18 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	u32		async_next;	/* address of next async queue head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	u32		reserved[9];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	/* CONFIGFLAG: offset 0x40 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	u32		configured_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) #define FLAG_CF		(1<<0)		/* true: we'll support "high speed" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	/* PORTSC: offset 0x44 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	u32		port_status[0];	/* up to N_PORTS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) /* 31:23 reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) #define PORT_WKOC_E	(1<<22)		/* wake on overcurrent (enable) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) #define PORT_WKDISC_E	(1<<21)		/* wake on disconnect (enable) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) #define PORT_WKCONN_E	(1<<20)		/* wake on connect (enable) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) /* 19:16 for port testing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) #define PORT_LED_OFF	(0<<14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) #define PORT_LED_AMBER	(1<<14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) #define PORT_LED_GREEN	(2<<14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) #define PORT_LED_MASK	(3<<14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) #define PORT_OWNER	(1<<13)		/* true: companion hc owns this port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) #define PORT_POWER	(1<<12)		/* true: has power (see PPC) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) #define PORT_USB11(x) (((x)&(3<<10)) == (1<<10))	/* USB 1.1 device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) /* 11:10 for detecting lowspeed devices (reset vs release ownership) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) /* 9 reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) #define PORT_RESET	(1<<8)		/* reset port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) #define PORT_SUSPEND	(1<<7)		/* suspend port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) #define PORT_RESUME	(1<<6)		/* resume it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) #define PORT_OCC	(1<<5)		/* over current change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) #define PORT_OC		(1<<4)		/* over current active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) #define PORT_PEC	(1<<3)		/* port enable change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) #define PORT_PE		(1<<2)		/* port enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) #define PORT_CSC	(1<<1)		/* connect status change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) #define PORT_CONNECT	(1<<0)		/* device connected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) #define PORT_RWC_BITS   (PORT_CSC | PORT_PEC | PORT_OCC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) /* Appendix C, Debug port ... intended for use with special "debug devices"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200)  * that can help if there's no serial console.  (nonstandard enumeration.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) struct ehci_dbg_port {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	u32	control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) #define DBGP_OWNER	(1<<30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) #define DBGP_ENABLED	(1<<28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) #define DBGP_DONE	(1<<16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) #define DBGP_INUSE	(1<<10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) #define DBGP_ERRCODE(x)	(((x)>>7)&0x07)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) #	define DBGP_ERR_BAD	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) #	define DBGP_ERR_SIGNAL	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) #define DBGP_ERROR	(1<<6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) #define DBGP_GO		(1<<5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) #define DBGP_OUT	(1<<4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) #define DBGP_LEN(x)	(((x)>>0)&0x0f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	u32	pids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) #define DBGP_PID_GET(x)		(((x)>>16)&0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) #define DBGP_PID_SET(data, tok)	(((data)<<8)|(tok))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	u32	data03;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	u32	data47;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	u32	address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) #define DBGP_EPADDR(dev, ep)	(((dev)<<8)|(ep))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) #define	QTD_NEXT(dma)	cpu_to_le32((u32)dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227)  * EHCI Specification 0.95 Section 3.5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228)  * QTD: describe data transfer components (buffer, direction, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229)  * See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231)  * These are associated only with "QH" (Queue Head) structures,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232)  * used with control, bulk, and interrupt transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) struct ehci_qtd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	/* first part defined by EHCI spec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	__le32			hw_next;		/* see EHCI 3.5.1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	__le32			hw_alt_next;		/* see EHCI 3.5.2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	__le32			hw_token;		/* see EHCI 3.5.3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) #define	QTD_TOGGLE	(1 << 31)	/* data toggle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) #define	QTD_LENGTH(tok)	(((tok)>>16) & 0x7fff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) #define	QTD_IOC		(1 << 15)	/* interrupt on complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) #define	QTD_CERR(tok)	(((tok)>>10) & 0x3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) #define	QTD_PID(tok)	(((tok)>>8) & 0x3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) #define	QTD_STS_ACTIVE	(1 << 7)	/* HC may execute this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) #define	QTD_STS_HALT	(1 << 6)	/* halted on error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) #define	QTD_STS_DBE	(1 << 5)	/* data buffer error (in HC) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) #define	QTD_STS_BABBLE	(1 << 4)	/* device was babbling (qtd halted) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) #define	QTD_STS_XACT	(1 << 3)	/* device gave illegal response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) #define	QTD_STS_MMF	(1 << 2)	/* incomplete split transaction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) #define	QTD_STS_STS	(1 << 1)	/* split transaction state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) #define	QTD_STS_PING	(1 << 0)	/* issue PING? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	__le32			hw_buf[5];		/* see EHCI 3.5.4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	__le32			hw_buf_hi[5];		/* Appendix B */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	/* the rest is HCD-private */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	dma_addr_t		qtd_dma;		/* qtd address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	struct list_head	qtd_list;		/* sw qtd list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	struct urb		*urb;			/* qtd's urb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	size_t			length;			/* length of buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	u32			qtd_buffer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	void			*buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	dma_addr_t		buffer_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	void			*transfer_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	void			*transfer_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) } __aligned(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) /* mask NakCnt+T in qh->hw_alt_next */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) #define QTD_MASK cpu_to_le32 (~0x1f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) #define IS_SHORT_READ(token) (QTD_LENGTH(token) != 0 && QTD_PID(token) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) /* Type tag from {qh, itd, sitd, fstn}->hw_next */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) #define Q_NEXT_TYPE(dma) ((dma) & cpu_to_le32 (3 << 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) /* values for that type tag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) #define Q_TYPE_QH	cpu_to_le32 (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) /* next async queue entry, or pointer to interrupt/periodic QH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) #define	QH_NEXT(dma)	(cpu_to_le32(((u32)dma)&~0x01f)|Q_TYPE_QH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) /* for periodic/async schedules and qtd lists, mark end of list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) #define	EHCI_LIST_END	cpu_to_le32(1) /* "null pointer" to hw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286)  * Entries in periodic shadow table are pointers to one of four kinds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287)  * of data structure.  That's dictated by the hardware; a type tag is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288)  * encoded in the low bits of the hardware's periodic schedule.  Use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289)  * Q_NEXT_TYPE to get the tag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291)  * For entries in the async schedule, the type tag always says "qh".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) union ehci_shadow {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	struct ehci_qh		*qh;		/* Q_TYPE_QH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	__le32			*hw_next;	/* (all types) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	void			*ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300)  * EHCI Specification 0.95 Section 3.6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301)  * QH: describes control/bulk/interrupt endpoints
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302)  * See Fig 3-7 "Queue Head Structure Layout".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304)  * These appear in both the async and (for interrupt) periodic schedules.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) struct ehci_qh {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	/* first part defined by EHCI spec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	__le32			hw_next;	 /* see EHCI 3.6.1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	__le32			hw_info1;	/* see EHCI 3.6.2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) #define	QH_HEAD		0x00008000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	__le32			hw_info2;	/* see EHCI 3.6.2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) #define	QH_SMASK	0x000000ff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) #define	QH_CMASK	0x0000ff00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) #define	QH_HUBADDR	0x007f0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) #define	QH_HUBPORT	0x3f800000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) #define	QH_MULT		0xc0000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	__le32			hw_current;	 /* qtd list - see EHCI 3.6.4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	/* qtd overlay (hardware parts of a struct ehci_qtd) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	__le32			hw_qtd_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	__le32			hw_alt_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	__le32			hw_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	__le32			hw_buf[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	__le32			hw_buf_hi[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	/* the rest is HCD-private */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	dma_addr_t		qh_dma;		/* address of qh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	union ehci_shadow	qh_next;	/* ptr to qh; or periodic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	struct list_head	qtd_list;	/* sw qtd list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	struct ehci_qtd		*dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	struct ehci_qh		*reclaim;	/* next to reclaim */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	struct oxu_hcd		*oxu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	struct kref		kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	unsigned int		stamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	u8			qh_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) #define	QH_STATE_LINKED		1		/* HC sees this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) #define	QH_STATE_UNLINK		2		/* HC may still see this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) #define	QH_STATE_IDLE		3		/* HC doesn't see this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) #define	QH_STATE_UNLINK_WAIT	4		/* LINKED and on reclaim q */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) #define	QH_STATE_COMPLETING	5		/* don't touch token.HALT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	/* periodic schedule info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	u8			usecs;		/* intr bandwidth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	u8			gap_uf;		/* uframes split/csplit gap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	u8			c_usecs;	/* ... split completion bw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	u16			tt_usecs;	/* tt downstream bandwidth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	unsigned short		period;		/* polling interval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	unsigned short		start;		/* where polling starts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) #define NO_FRAME ((unsigned short)~0)			/* pick new start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	struct usb_device	*dev;		/* access to TT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) } __aligned(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357)  * Proper OXU210HP structs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) #define OXU_OTG_CORE_OFFSET	0x00400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) #define OXU_OTG_CAP_OFFSET	(OXU_OTG_CORE_OFFSET + 0x100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) #define OXU_SPH_CORE_OFFSET	0x00800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) #define OXU_SPH_CAP_OFFSET	(OXU_SPH_CORE_OFFSET + 0x100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) #define OXU_OTG_MEM		0xE000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) #define OXU_SPH_MEM		0x16000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) /* Only how many elements & element structure are specifies here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) /* 2 host controllers are enabled - total size <= 28 kbytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) #define	DEFAULT_I_TDPS		1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) #define QHEAD_NUM		16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) #define QTD_NUM			32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) #define SITD_NUM		8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) #define MURB_NUM		8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) #define BUFFER_NUM		8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) #define BUFFER_SIZE		512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) struct oxu_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	struct usb_hcd *hcd[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) struct oxu_buf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	u8			buffer[BUFFER_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) } __aligned(BUFFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) struct oxu_onchip_mem {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	struct oxu_buf		db_pool[BUFFER_NUM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	u32			frame_list[DEFAULT_I_TDPS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	struct ehci_qh		qh_pool[QHEAD_NUM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	struct ehci_qtd		qtd_pool[QTD_NUM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) } __aligned(4 << 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) #define	EHCI_MAX_ROOT_PORTS	15		/* see HCS_N_PORTS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) struct oxu_murb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	struct urb		urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	struct urb		*main;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	u8			last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) struct oxu_hcd {				/* one per controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	unsigned int		is_otg:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	u8			qh_used[QHEAD_NUM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	u8			qtd_used[QTD_NUM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	u8			db_used[BUFFER_NUM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	u8			murb_used[MURB_NUM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	struct oxu_onchip_mem	__iomem *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	spinlock_t		mem_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	struct timer_list	urb_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	struct ehci_caps __iomem *caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	struct ehci_regs __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	u32			hcs_params;	/* cached register copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	spinlock_t		lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	/* async schedule support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	struct ehci_qh		*async;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	struct ehci_qh		*reclaim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	unsigned int		reclaim_ready:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	unsigned int		scanning:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	/* periodic schedule support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	unsigned int		periodic_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	__le32			*periodic;	/* hw periodic table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	dma_addr_t		periodic_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	unsigned int		i_thresh;	/* uframes HC might cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	union ehci_shadow	*pshadow;	/* mirror hw periodic table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	int			next_uframe;	/* scan periodic, start here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	unsigned int		periodic_sched;	/* periodic activity count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	/* per root hub port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	unsigned long		reset_done[EHCI_MAX_ROOT_PORTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	/* bit vectors (one bit per port) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	unsigned long		bus_suspended;	/* which ports were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 						 * already suspended at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 						 * start of a bus suspend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 						 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	unsigned long		companion_ports;/* which ports are dedicated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 						 * to the companion controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 						 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	struct timer_list	watchdog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	unsigned long		actions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	unsigned int		stamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	unsigned long		next_statechange;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	u32			command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	/* SILICON QUIRKS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	struct list_head	urb_list;	/* this is the head to urb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 						 * queue that didn't get enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 						 * resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 						 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	struct oxu_murb		*murb_pool;	/* murb per split big urb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	unsigned int		urb_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	u8			sbrn;		/* packed release number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) #define EHCI_IAA_JIFFIES	(HZ/100)	/* arbitrary; ~10 msec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) #define EHCI_IO_JIFFIES		(HZ/10)		/* io watchdog > irq_thresh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) #define EHCI_ASYNC_JIFFIES      (HZ/20)		/* async idle timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) #define EHCI_SHRINK_JIFFIES     (HZ/200)	/* async qh unlink delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) enum ehci_timer_action {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	TIMER_IO_WATCHDOG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	TIMER_IAA_WATCHDOG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	TIMER_ASYNC_SHRINK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	TIMER_ASYNC_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479)  * Main defines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) #define oxu_dbg(oxu, fmt, args...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		dev_dbg(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) #define oxu_err(oxu, fmt, args...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		dev_err(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) #define oxu_info(oxu, fmt, args...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		dev_info(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) #ifdef CONFIG_DYNAMIC_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) #define DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) static inline struct usb_hcd *oxu_to_hcd(struct oxu_hcd *oxu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	return container_of((void *) oxu, struct usb_hcd, hcd_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) static inline struct oxu_hcd *hcd_to_oxu(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	return (struct oxu_hcd *) (hcd->hcd_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504)  * Debug stuff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) #undef OXU_URB_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) #undef OXU_VERBOSE_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) #ifdef OXU_VERBOSE_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) #define oxu_vdbg			oxu_dbg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) #define oxu_vdbg(oxu, fmt, args...)	/* Nop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) static int __attribute__((__unused__))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	return scnprintf(buf, len, "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		label, label[0] ? " " : "", status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		(status & STS_ASS) ? " Async" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		(status & STS_PSS) ? " Periodic" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		(status & STS_RECL) ? " Recl" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		(status & STS_HALT) ? " Halt" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		(status & STS_IAA) ? " IAA" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		(status & STS_FATAL) ? " FATAL" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		(status & STS_FLR) ? " FLR" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		(status & STS_PCD) ? " PCD" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		(status & STS_ERR) ? " ERR" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		(status & STS_INT) ? " INT" : ""
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) static int __attribute__((__unused__))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	return scnprintf(buf, len, "%s%sintrenable %02x%s%s%s%s%s%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		label, label[0] ? " " : "", enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		(enable & STS_IAA) ? " IAA" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		(enable & STS_FATAL) ? " FATAL" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		(enable & STS_FLR) ? " FLR" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		(enable & STS_PCD) ? " PCD" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		(enable & STS_ERR) ? " ERR" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		(enable & STS_INT) ? " INT" : ""
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) static const char *const fls_strings[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551)     { "1024", "512", "256", "??" };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) static int dbg_command_buf(char *buf, unsigned len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 				const char *label, u32 command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	return scnprintf(buf, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		"%s%scommand %06x %s=%d ithresh=%d%s%s%s%s period=%s%s %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		label, label[0] ? " " : "", command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		(command & CMD_PARK) ? "park" : "(park)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		CMD_PARK_CNT(command),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		(command >> 16) & 0x3f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		(command & CMD_LRESET) ? " LReset" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		(command & CMD_IAAD) ? " IAAD" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		(command & CMD_ASE) ? " Async" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		(command & CMD_PSE) ? " Periodic" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		fls_strings[(command >> 2) & 0x3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		(command & CMD_RESET) ? " Reset" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		(command & CMD_RUN) ? "RUN" : "HALT"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) static int dbg_port_buf(char *buf, unsigned len, const char *label,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 				int port, u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	char	*sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	/* signaling state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	switch (status & (3 << 10)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	case 0 << 10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		sig = "se0";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	case 1 << 10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		sig = "k";	/* low speed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	case 2 << 10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		sig = "j";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		sig = "?";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	return scnprintf(buf, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		"%s%sport %d status %06x%s%s sig=%s%s%s%s%s%s%s%s%s%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		label, label[0] ? " " : "", port, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		(status & PORT_POWER) ? " POWER" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		(status & PORT_OWNER) ? " OWNER" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		sig,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		(status & PORT_RESET) ? " RESET" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		(status & PORT_SUSPEND) ? " SUSPEND" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		(status & PORT_RESUME) ? " RESUME" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		(status & PORT_OCC) ? " OCC" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		(status & PORT_OC) ? " OC" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		(status & PORT_PEC) ? " PEC" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		(status & PORT_PE) ? " PE" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		(status & PORT_CSC) ? " CSC" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		(status & PORT_CONNECT) ? " CONNECT" : ""
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	    );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) static inline int __attribute__((__unused__))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) static inline int __attribute__((__unused__))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) dbg_command_buf(char *buf, unsigned len, const char *label, u32 command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) static inline int __attribute__((__unused__))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) static inline int __attribute__((__unused__))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) #endif /* DEBUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) /* functions have the "wrong" filename when they're output... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) #define dbg_status(oxu, label, status) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	char _buf[80]; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	dbg_status_buf(_buf, sizeof _buf, label, status); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	oxu_dbg(oxu, "%s\n", _buf); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) #define dbg_cmd(oxu, label, command) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	char _buf[80]; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	dbg_command_buf(_buf, sizeof _buf, label, command); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	oxu_dbg(oxu, "%s\n", _buf); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) #define dbg_port(oxu, label, port, status) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	char _buf[80]; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	dbg_port_buf(_buf, sizeof _buf, label, port, status); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	oxu_dbg(oxu, "%s\n", _buf); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651)  * Module parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) /* Initial IRQ latency: faster than hw default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) static int log2_irq_thresh;			/* 0 to 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) module_param(log2_irq_thresh, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) MODULE_PARM_DESC(log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) /* Initial park setting: slower than hw default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) static unsigned park;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) module_param(park, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) /* For flakey hardware, ignore overcurrent indicators */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) static bool ignore_oc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) module_param(ignore_oc, bool, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) MODULE_PARM_DESC(ignore_oc, "ignore bogus hardware overcurrent indications");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) static void ehci_work(struct oxu_hcd *oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) static int oxu_hub_control(struct usb_hcd *hcd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 				u16 typeReq, u16 wValue, u16 wIndex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 				char *buf, u16 wLength);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676)  * Local functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) /* Low level read/write registers functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) static inline u32 oxu_readl(void __iomem *base, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	return readl(base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) static inline void oxu_writel(void __iomem *base, u32 reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	writel(val, base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) static inline void timer_action_done(struct oxu_hcd *oxu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 					enum ehci_timer_action action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	clear_bit(action, &oxu->actions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) static inline void timer_action(struct oxu_hcd *oxu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 					enum ehci_timer_action action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	if (!test_and_set_bit(action, &oxu->actions)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		unsigned long t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		switch (action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		case TIMER_IAA_WATCHDOG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 			t = EHCI_IAA_JIFFIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		case TIMER_IO_WATCHDOG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 			t = EHCI_IO_JIFFIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		case TIMER_ASYNC_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 			t = EHCI_ASYNC_JIFFIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		case TIMER_ASYNC_SHRINK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 			t = EHCI_SHRINK_JIFFIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		t += jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		/* all timings except IAA watchdog can be overridden.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		 * async queue SHRINK often precedes IAA.  while it's ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		 * to go OFF neither can matter, and afterwards the IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		 * watchdog stops unless there's still periodic traffic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		if (action != TIMER_IAA_WATCHDOG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 				&& t > oxu->watchdog.expires
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 				&& timer_pending(&oxu->watchdog))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		mod_timer(&oxu->watchdog, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732)  * handshake - spin reading hc until handshake completes or fails
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733)  * @ptr: address of hc register to be read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734)  * @mask: bits to look at in result of read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735)  * @done: value of those bits when handshake succeeds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736)  * @usec: timeout in microseconds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738)  * Returns negative errno, or zero on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740)  * Success happens when the "mask" bits have the specified value (hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741)  * handshake done).  There are two failure modes:  "usec" have passed (major
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742)  * hardware flakeout), or the register reads as all-ones (hardware removed).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744)  * That last failure should_only happen in cases like physical cardbus eject
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745)  * before driver shutdown. But it also seems to be caused by bugs in cardbus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746)  * bridge shutdown:  shutting down the bridge before the devices using it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) static int handshake(struct oxu_hcd *oxu, void __iomem *ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 					u32 mask, u32 done, int usec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	u32 result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	ret = readl_poll_timeout_atomic(ptr, result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 					((result & mask) == done ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 					 result == U32_MAX),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 					1, usec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	if (result == U32_MAX)		/* card removed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) /* Force HC to halt state from unknown (EHCI spec section 2.3) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) static int ehci_halt(struct oxu_hcd *oxu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	u32	temp = readl(&oxu->regs->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	/* disable any irqs left enabled by previous code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	writel(0, &oxu->regs->intr_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	if ((temp & STS_HALT) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	temp = readl(&oxu->regs->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	temp &= ~CMD_RUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	writel(temp, &oxu->regs->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	return handshake(oxu, &oxu->regs->status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 			  STS_HALT, STS_HALT, 16 * 125);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) /* Put TDI/ARC silicon into EHCI mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) static void tdi_reset(struct oxu_hcd *oxu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	u32 __iomem *reg_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	reg_ptr = (u32 __iomem *)(((u8 __iomem *)oxu->regs) + 0x68);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	tmp = readl(reg_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	tmp |= 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	writel(tmp, reg_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) /* Reset a non-running (STS_HALT == 1) controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) static int ehci_reset(struct oxu_hcd *oxu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	int	retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	u32	command = readl(&oxu->regs->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	command |= CMD_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	dbg_cmd(oxu, "reset", command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	writel(command, &oxu->regs->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	oxu_to_hcd(oxu)->state = HC_STATE_HALT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	oxu->next_statechange = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	retval = handshake(oxu, &oxu->regs->command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 			    CMD_RESET, 0, 250 * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	tdi_reset(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) /* Idle the controller (from running) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) static void ehci_quiesce(struct oxu_hcd *oxu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	u32	temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	BUG_ON(!HC_IS_RUNNING(oxu_to_hcd(oxu)->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	/* wait for any schedule enables/disables to take effect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	temp = readl(&oxu->regs->command) << 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	temp &= STS_ASS | STS_PSS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 				temp, 16 * 125) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		oxu_to_hcd(oxu)->state = HC_STATE_HALT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	/* then disable anything that's still active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	temp = readl(&oxu->regs->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	writel(temp, &oxu->regs->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	/* hardware can take 16 microframes to turn off ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 				0, 16 * 125) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		oxu_to_hcd(oxu)->state = HC_STATE_HALT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) static int check_reset_complete(struct oxu_hcd *oxu, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 				u32 __iomem *status_reg, int port_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	if (!(port_status & PORT_CONNECT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		oxu->reset_done[index] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		return port_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	/* if reset finished and it's still not enabled -- handoff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	if (!(port_status & PORT_PE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		oxu_dbg(oxu, "Failed to enable port %d on root hub TT\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 				index+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		return port_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		oxu_dbg(oxu, "port %d high speed\n", index + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	return port_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) static void ehci_hub_descriptor(struct oxu_hcd *oxu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 				struct usb_hub_descriptor *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	int ports = HCS_N_PORTS(oxu->hcs_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	u16 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	desc->bDescriptorType = USB_DT_HUB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	desc->bPwrOn2PwrGood = 10;	/* oxu 1.0, 2.3.9 says 20ms max */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	desc->bHubContrCurrent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	desc->bNbrPorts = ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	temp = 1 + (ports / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	desc->bDescLength = 7 + 2 * temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	/* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	temp = HUB_CHAR_INDV_PORT_OCPM;	/* per-port overcurrent reporting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	if (HCS_PPC(oxu->hcs_params))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		temp |= HUB_CHAR_INDV_PORT_LPSM; /* per-port power control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		temp |= HUB_CHAR_NO_LPSM; /* no power switching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	desc->wHubCharacteristics = (__force __u16)cpu_to_le16(temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) /* Allocate an OXU210HP on-chip memory data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895)  * An on-chip memory data buffer is required for each OXU210HP USB transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896)  * Each transfer descriptor has one or more on-chip memory data buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898)  * Data buffers are allocated from a fix sized pool of data blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899)  * To minimise fragmentation and give reasonable memory utlisation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900)  * data buffers are allocated with sizes the power of 2 multiples of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901)  * the block size, starting on an address a multiple of the allocated size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903)  * FIXME: callers of this function require a buffer to be allocated for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904)  * len=0. This is a waste of on-chip memory and should be fix. Then this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905)  * function should be changed to not allocate a buffer for len=0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	int n_blocks;	/* minium blocks needed to hold len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	int a_blocks;	/* blocks allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	/* Don't allocte bigger than supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	if (len > BUFFER_SIZE * BUFFER_NUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		oxu_err(oxu, "buffer too big (%d)\n", len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	spin_lock(&oxu->mem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	/* Number of blocks needed to hold len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	n_blocks = (len + BUFFER_SIZE - 1) / BUFFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	/* Round the number of blocks up to the power of 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	for (a_blocks = 1; a_blocks < n_blocks; a_blocks <<= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	/* Find a suitable available data buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	for (i = 0; i < BUFFER_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 			i += max(a_blocks, (int)oxu->db_used[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		/* Check all the required blocks are available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		for (j = 0; j < a_blocks; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 			if (oxu->db_used[i + j])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		if (j != a_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		/* Allocate blocks found! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		qtd->buffer = (void *) &oxu->mem->db_pool[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		qtd->buffer_dma = virt_to_phys(qtd->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		qtd->qtd_buffer_len = BUFFER_SIZE * a_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		oxu->db_used[i] = a_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		spin_unlock(&oxu->mem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	/* Failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	spin_unlock(&oxu->mem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) static void oxu_buf_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	spin_lock(&oxu->mem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	index = (qtd->buffer - (void *) &oxu->mem->db_pool[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 							 / BUFFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	oxu->db_used[index] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	qtd->qtd_buffer_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	qtd->buffer_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	qtd->buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	spin_unlock(&oxu->mem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) static inline void ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	memset(qtd, 0, sizeof *qtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	qtd->qtd_dma = dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	qtd->hw_next = EHCI_LIST_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	qtd->hw_alt_next = EHCI_LIST_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	INIT_LIST_HEAD(&qtd->qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) static inline void oxu_qtd_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	if (qtd->buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		oxu_buf_free(oxu, qtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	spin_lock(&oxu->mem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	index = qtd - &oxu->mem->qtd_pool[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	oxu->qtd_used[index] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	spin_unlock(&oxu->mem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) static struct ehci_qtd *ehci_qtd_alloc(struct oxu_hcd *oxu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	struct ehci_qtd *qtd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	spin_lock(&oxu->mem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	for (i = 0; i < QTD_NUM; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		if (!oxu->qtd_used[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	if (i < QTD_NUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		qtd = (struct ehci_qtd *) &oxu->mem->qtd_pool[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		memset(qtd, 0, sizeof *qtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		qtd->hw_next = EHCI_LIST_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		qtd->hw_alt_next = EHCI_LIST_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		INIT_LIST_HEAD(&qtd->qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		qtd->qtd_dma = virt_to_phys(qtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		oxu->qtd_used[i] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	spin_unlock(&oxu->mem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	return qtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) static void oxu_qh_free(struct oxu_hcd *oxu, struct ehci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	spin_lock(&oxu->mem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	index = qh - &oxu->mem->qh_pool[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	oxu->qh_used[index] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	spin_unlock(&oxu->mem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) static void qh_destroy(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	struct ehci_qh *qh = container_of(kref, struct ehci_qh, kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	struct oxu_hcd *oxu = qh->oxu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	/* clean qtds first, and know this is not linked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		oxu_dbg(oxu, "unused qh not empty!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	if (qh->dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		oxu_qtd_free(oxu, qh->dummy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	oxu_qh_free(oxu, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) static struct ehci_qh *oxu_qh_alloc(struct oxu_hcd *oxu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	struct ehci_qh *qh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	spin_lock(&oxu->mem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	for (i = 0; i < QHEAD_NUM; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		if (!oxu->qh_used[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	if (i < QHEAD_NUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		qh = (struct ehci_qh *) &oxu->mem->qh_pool[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		memset(qh, 0, sizeof *qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		kref_init(&qh->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		qh->oxu = oxu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		qh->qh_dma = virt_to_phys(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		INIT_LIST_HEAD(&qh->qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		/* dummy td enables safe urb queuing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		qh->dummy = ehci_qtd_alloc(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		if (qh->dummy == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 			oxu_dbg(oxu, "no dummy td\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 			oxu->qh_used[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 			qh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 			goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		oxu->qh_used[i] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	spin_unlock(&oxu->mem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	return qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) /* to share a qh (cpu threads, or hc) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) static inline struct ehci_qh *qh_get(struct ehci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	kref_get(&qh->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	return qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) static inline void qh_put(struct ehci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	kref_put(&qh->kref, qh_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) static void oxu_murb_free(struct oxu_hcd *oxu, struct oxu_murb *murb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	spin_lock(&oxu->mem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	index = murb - &oxu->murb_pool[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	oxu->murb_used[index] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	spin_unlock(&oxu->mem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) static struct oxu_murb *oxu_murb_alloc(struct oxu_hcd *oxu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	struct oxu_murb *murb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	spin_lock(&oxu->mem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	for (i = 0; i < MURB_NUM; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		if (!oxu->murb_used[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	if (i < MURB_NUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		murb = &(oxu->murb_pool)[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		oxu->murb_used[i] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	spin_unlock(&oxu->mem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	return murb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) /* The queue heads and transfer descriptors are managed from pools tied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)  * to each of the "per device" structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)  * This is the initialisation and cleanup code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) static void ehci_mem_cleanup(struct oxu_hcd *oxu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	kfree(oxu->murb_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	oxu->murb_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	if (oxu->async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		qh_put(oxu->async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	oxu->async = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	del_timer(&oxu->urb_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	oxu->periodic = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	/* shadow periodic table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	kfree(oxu->pshadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	oxu->pshadow = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) /* Remember to add cleanup code (above) if you add anything here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) static int ehci_mem_init(struct oxu_hcd *oxu, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	for (i = 0; i < oxu->periodic_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		oxu->mem->frame_list[i] = EHCI_LIST_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	for (i = 0; i < QHEAD_NUM; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 		oxu->qh_used[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	for (i = 0; i < QTD_NUM; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		oxu->qtd_used[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	oxu->murb_pool = kcalloc(MURB_NUM, sizeof(struct oxu_murb), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	if (!oxu->murb_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	for (i = 0; i < MURB_NUM; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		oxu->murb_used[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	oxu->async = oxu_qh_alloc(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	if (!oxu->async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	oxu->periodic = (__le32 *) &oxu->mem->frame_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	oxu->periodic_dma = virt_to_phys(oxu->periodic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	for (i = 0; i < oxu->periodic_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		oxu->periodic[i] = EHCI_LIST_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	/* software shadow of hardware table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	oxu->pshadow = kcalloc(oxu->periodic_size, sizeof(void *), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	if (oxu->pshadow != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	oxu_dbg(oxu, "couldn't init memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	ehci_mem_cleanup(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) /* Fill a qtd, returning how much of the buffer we were able to queue up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) static int qtd_fill(struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 				int token, int maxpacket)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	int i, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	u64 addr = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	/* one buffer entry per 4K ... first might be short or unaligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	qtd->hw_buf[0] = cpu_to_le32((u32)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	qtd->hw_buf_hi[0] = cpu_to_le32((u32)(addr >> 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	count = 0x1000 - (buf & 0x0fff);	/* rest of that page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	if (likely(len < count))		/* ... iff needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		count = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		buf +=  0x1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		buf &= ~0x0fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		/* per-qtd limit: from 16K to 20K (best alignment) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		for (i = 1; count < len && i < 5; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 			addr = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 			qtd->hw_buf[i] = cpu_to_le32((u32)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 			qtd->hw_buf_hi[i] = cpu_to_le32((u32)(addr >> 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 			buf += 0x1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 			if ((count + 0x1000) < len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 				count += 0x1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 				count = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		/* short packets may only terminate transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		if (count != len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 			count -= (count % maxpacket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	qtd->hw_token = cpu_to_le32((count << 16) | token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	qtd->length = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) static inline void qh_update(struct oxu_hcd *oxu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 				struct ehci_qh *qh, struct ehci_qtd *qtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	/* writes to an active overlay are unsafe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	BUG_ON(qh->qh_state != QH_STATE_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	qh->hw_qtd_next = QTD_NEXT(qtd->qtd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	qh->hw_alt_next = EHCI_LIST_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	/* Except for control endpoints, we make hardware maintain data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	 * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	 * and set the pseudo-toggle in udev. Only usb_clear_halt() will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	 * ever clear it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	if (!(qh->hw_info1 & cpu_to_le32(1 << 14))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		unsigned	is_out, epnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		is_out = !(qtd->hw_token & cpu_to_le32(1 << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		epnum = (le32_to_cpup(&qh->hw_info1) >> 8) & 0x0f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		if (unlikely(!usb_gettoggle(qh->dev, epnum, is_out))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 			qh->hw_token &= ~cpu_to_le32(QTD_TOGGLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 			usb_settoggle(qh->dev, epnum, is_out, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	/* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	qh->hw_token &= cpu_to_le32(QTD_TOGGLE | QTD_STS_PING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) /* If it weren't for a common silicon quirk (writing the dummy into the qh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)  * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)  * recovery (including urb dequeue) would need software changes to a QH...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) static void qh_refresh(struct oxu_hcd *oxu, struct ehci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	struct ehci_qtd *qtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	if (list_empty(&qh->qtd_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 		qtd = qh->dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 		qtd = list_entry(qh->qtd_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 				struct ehci_qtd, qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		/* first qtd may already be partially processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 		if (cpu_to_le32(qtd->qtd_dma) == qh->hw_current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 			qtd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	if (qtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		qh_update(oxu, qh, qtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) static void qtd_copy_status(struct oxu_hcd *oxu, struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 				size_t length, u32 token)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	/* count IN/OUT bytes, not SETUP (even short packets) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	if (likely(QTD_PID(token) != 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		urb->actual_length += length - QTD_LENGTH(token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	/* don't modify error codes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	if (unlikely(urb->status != -EINPROGRESS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	/* force cleanup after short read; not always an error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	if (unlikely(IS_SHORT_READ(token)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		urb->status = -EREMOTEIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	/* serious "can't proceed" faults reported by the hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	if (token & QTD_STS_HALT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		if (token & QTD_STS_BABBLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 			/* FIXME "must" disable babbling device's port too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 			urb->status = -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		} else if (token & QTD_STS_MMF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 			/* fs/ls interrupt xfer missed the complete-split */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 			urb->status = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		} else if (token & QTD_STS_DBE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 			urb->status = (QTD_PID(token) == 1) /* IN ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 				? -ENOSR  /* hc couldn't read data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 				: -ECOMM; /* hc couldn't write data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		} else if (token & QTD_STS_XACT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 			/* timeout, bad crc, wrong PID, etc; retried */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 			if (QTD_CERR(token))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 				urb->status = -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 			else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 				oxu_dbg(oxu, "devpath %s ep%d%s 3strikes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 					urb->dev->devpath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 					usb_pipeendpoint(urb->pipe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 					usb_pipein(urb->pipe) ? "in" : "out");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 				urb->status = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		/* CERR nonzero + no errors + halt --> stall */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		} else if (QTD_CERR(token))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 			urb->status = -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		else	/* unknown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 			urb->status = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		oxu_vdbg(oxu, "dev%d ep%d%s qtd token %08x --> status %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 			usb_pipedevice(urb->pipe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 			usb_pipeendpoint(urb->pipe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 			usb_pipein(urb->pipe) ? "in" : "out",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 			token, urb->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) static void ehci_urb_done(struct oxu_hcd *oxu, struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) __releases(oxu->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) __acquires(oxu->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	if (likely(urb->hcpriv != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		struct ehci_qh	*qh = (struct ehci_qh *) urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		/* S-mask in a QH means it's an interrupt urb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		if ((qh->hw_info2 & cpu_to_le32(QH_SMASK)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 			/* ... update hc-wide periodic stats (for usbfs) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 			oxu_to_hcd(oxu)->self.bandwidth_int_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		qh_put(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	urb->hcpriv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	switch (urb->status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	case -EINPROGRESS:		/* success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		urb->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	default:			/* fault */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	case -EREMOTEIO:		/* fault or normal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 			urb->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	case -ECONNRESET:		/* canceled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	case -ENOENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) #ifdef OXU_URB_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	oxu_dbg(oxu, "%s %s urb %p ep%d%s status %d len %d/%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		__func__, urb->dev->devpath, urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 		usb_pipeendpoint(urb->pipe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		usb_pipein(urb->pipe) ? "in" : "out",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		urb->status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		urb->actual_length, urb->transfer_buffer_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	/* complete() can reenter this HCD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	spin_unlock(&oxu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	usb_hcd_giveback_urb(oxu_to_hcd(oxu), urb, urb->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	spin_lock(&oxu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) #define HALT_BIT cpu_to_le32(QTD_STS_HALT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) /* Process and free completed qtds for a qh, returning URBs to drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)  * Chases up to qh->hw_current.  Returns number of completions called,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)  * indicating how much "real" work we did.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) static unsigned qh_completions(struct oxu_hcd *oxu, struct ehci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	struct ehci_qtd *last = NULL, *end = qh->dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	struct ehci_qtd	*qtd, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	int stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	unsigned count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	int do_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	u8 state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	struct oxu_murb *murb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	if (unlikely(list_empty(&qh->qtd_list)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	/* completions (or tasks on other cpus) must never clobber HALT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	 * till we've gone through and cleaned everything up, even when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	 * they add urbs to this qh's queue or mark them for unlinking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	 * NOTE:  unlinking expects to be done in queue order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	state = qh->qh_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	qh->qh_state = QH_STATE_COMPLETING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	stopped = (state == QH_STATE_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	/* remove de-activated QTDs from front of queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	 * after faults (including short reads), cleanup this urb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	 * then let the queue advance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	 * if queue is stopped, handles unlinks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	list_for_each_entry_safe(qtd, tmp, &qh->qtd_list, qtd_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		struct urb *urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		u32 token = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		urb = qtd->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 		/* Clean up any state from previous QTD ...*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		if (last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 			if (likely(last->urb != urb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 				if (last->urb->complete == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 					murb = (struct oxu_murb *) last->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 					last->urb = murb->main;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 					if (murb->last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 						ehci_urb_done(oxu, last->urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 						count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 					}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 					oxu_murb_free(oxu, murb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 				} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 					ehci_urb_done(oxu, last->urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 					count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 			oxu_qtd_free(oxu, last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 			last = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		/* ignore urbs submitted during completions we reported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		if (qtd == end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		/* hardware copies qtd out of qh overlay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 		token = le32_to_cpu(qtd->hw_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 		/* always clean up qtds the hc de-activated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 		if ((token & QTD_STS_ACTIVE) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 			if ((token & QTD_STS_HALT) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 				stopped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 			/* magic dummy for some short reads; qh won't advance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 			 * that silicon quirk can kick in with this dummy too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 			} else if (IS_SHORT_READ(token) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 					!(qtd->hw_alt_next & EHCI_LIST_END)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 				stopped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 				goto halt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 		/* stop scanning when we reach qtds the hc is using */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		} else if (likely(!stopped &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 				HC_IS_RUNNING(oxu_to_hcd(oxu)->state))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 			stopped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 			if (unlikely(!HC_IS_RUNNING(oxu_to_hcd(oxu)->state)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 				urb->status = -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 			/* ignore active urbs unless some previous qtd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 			 * for the urb faulted (including short read) or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 			 * its urb was canceled.  we may patch qh or qtds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 			if (likely(urb->status == -EINPROGRESS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 			/* issue status after short control reads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 			if (unlikely(do_status != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 					&& QTD_PID(token) == 0 /* OUT */) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 				do_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 			/* token in overlay may be most current */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 			if (state == QH_STATE_IDLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 					&& cpu_to_le32(qtd->qtd_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 						== qh->hw_current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 				token = le32_to_cpu(qh->hw_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 			/* force halt for unlinked or blocked qh, so we'll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 			 * patch the qh later and so that completions can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 			 * activate it while we "know" it's stopped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 			if ((HALT_BIT & qh->hw_token) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) halt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 				qh->hw_token |= HALT_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 				wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		/* Remove it from the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 		qtd_copy_status(oxu, urb->complete ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 					urb : ((struct oxu_murb *) urb)->main,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 				qtd->length, token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		if ((usb_pipein(qtd->urb->pipe)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 				(NULL != qtd->transfer_buffer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 			memcpy(qtd->transfer_buffer, qtd->buffer, qtd->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 		do_status = (urb->status == -EREMOTEIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 				&& usb_pipecontrol(urb->pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 		if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 			last = list_entry(qtd->qtd_list.prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 					struct ehci_qtd, qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 			last->hw_next = qtd->hw_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 		list_del(&qtd->qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 		last = qtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	/* last urb's completion might still need calling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	if (likely(last != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 		if (last->urb->complete == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 			murb = (struct oxu_murb *) last->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 			last->urb = murb->main;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 			if (murb->last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 				ehci_urb_done(oxu, last->urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 				count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 			oxu_murb_free(oxu, murb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 			ehci_urb_done(oxu, last->urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 			count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 		oxu_qtd_free(oxu, last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	/* restore original state; caller must unlink or relink */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	qh->qh_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	/* be sure the hardware's done with the qh before refreshing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	 * it after fault cleanup, or recovering from silicon wrongly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	 * overlaying the dummy qtd (which reduces DMA chatter).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 		switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		case QH_STATE_IDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 			qh_refresh(oxu, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		case QH_STATE_LINKED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 			/* should be rare for periodic transfers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 			 * except maybe high bandwidth ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 			if ((cpu_to_le32(QH_SMASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 					& qh->hw_info2) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 				intr_deschedule(oxu, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 				(void) qh_schedule(oxu, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 				unlink_async(oxu, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 		/* otherwise, unlink already started */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) /* High bandwidth multiplier, as encoded in highspeed endpoint descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) #define hb_mult(wMaxPacketSize)		(1 + (((wMaxPacketSize) >> 11) & 0x03))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) /* ... and packet size, for any kind of endpoint descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) #define max_packet(wMaxPacketSize)	((wMaxPacketSize) & 0x07ff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) /* Reverse of qh_urb_transaction: free a list of TDs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)  * used for cleanup after errors, before HC sees an URB's TDs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) static void qtd_list_free(struct oxu_hcd *oxu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 				struct urb *urb, struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	struct ehci_qtd	*qtd, *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	list_for_each_entry_safe(qtd, temp, head, qtd_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 		list_del(&qtd->qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 		oxu_qtd_free(oxu, qtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) /* Create a list of filled qtds for this URB; won't link into qh.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) static struct list_head *qh_urb_transaction(struct oxu_hcd *oxu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 						struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 						struct list_head *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 						gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	struct ehci_qtd	*qtd, *qtd_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	dma_addr_t buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	int len, maxpacket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	int is_input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	u32 token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	void *transfer_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	 * URBs map to sequences of QTDs: one logical transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	qtd = ehci_qtd_alloc(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	if (unlikely(!qtd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	list_add_tail(&qtd->qtd_list, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	qtd->urb = urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	token = QTD_STS_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	token |= (EHCI_TUNE_CERR << 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	/* for split transactions, SplitXState initialized to zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	len = urb->transfer_buffer_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	is_input = usb_pipein(urb->pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	if (!urb->transfer_buffer && urb->transfer_buffer_length && is_input)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 		urb->transfer_buffer = phys_to_virt(urb->transfer_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	if (usb_pipecontrol(urb->pipe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 		/* SETUP pid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 		ret = oxu_buf_alloc(oxu, qtd, sizeof(struct usb_ctrlrequest));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 		qtd_fill(qtd, qtd->buffer_dma, sizeof(struct usb_ctrlrequest),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 				token | (2 /* "setup" */ << 8), 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 		memcpy(qtd->buffer, qtd->urb->setup_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 				sizeof(struct usb_ctrlrequest));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 		/* ... and always at least one more pid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 		token ^= QTD_TOGGLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 		qtd_prev = qtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		qtd = ehci_qtd_alloc(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		if (unlikely(!qtd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		qtd->urb = urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 		qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 		list_add_tail(&qtd->qtd_list, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 		/* for zero length DATA stages, STATUS is always IN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		if (len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 			token |= (1 /* "in" */ << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	 * Data transfer stage: buffer setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	ret = oxu_buf_alloc(oxu, qtd, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 		goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	buf = qtd->buffer_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	transfer_buf = urb->transfer_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	if (!is_input)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 		memcpy(qtd->buffer, qtd->urb->transfer_buffer, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	if (is_input)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 		token |= (1 /* "in" */ << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	/* else it's already initted to "out" pid (0 << 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	 * buffer gets wrapped in one or more qtds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	 * last one may be "short" (including zero len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	 * and may serve as a control status ack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		int this_qtd_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 		this_qtd_len = qtd_fill(qtd, buf, len, token, maxpacket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		qtd->transfer_buffer = transfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 		len -= this_qtd_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 		buf += this_qtd_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 		transfer_buf += this_qtd_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 		if (is_input)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 			qtd->hw_alt_next = oxu->async->hw_alt_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 		/* qh makes control packets use qtd toggle; maybe switch it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 		if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 			token ^= QTD_TOGGLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 		if (likely(len <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 		qtd_prev = qtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 		qtd = ehci_qtd_alloc(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 		if (unlikely(!qtd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 		if (likely(len > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 			ret = oxu_buf_alloc(oxu, qtd, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 				goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 		qtd->urb = urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 		qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 		list_add_tail(&qtd->qtd_list, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	/* unless the bulk/interrupt caller wants a chance to clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	 * up after short reads, hc should advance qh past this urb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 				|| usb_pipecontrol(urb->pipe)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 		qtd->hw_alt_next = EHCI_LIST_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	 * control requests may need a terminating data "status" ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	 * bulk ones may need a terminating short packet (zero length).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	if (likely(urb->transfer_buffer_length != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 		int	one_more = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 		if (usb_pipecontrol(urb->pipe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 			one_more = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 			token ^= 0x0100;	/* "in" <--> "out"  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 			token |= QTD_TOGGLE;	/* force DATA1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 		} else if (usb_pipebulk(urb->pipe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 				&& (urb->transfer_flags & URB_ZERO_PACKET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 				&& !(urb->transfer_buffer_length % maxpacket)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 			one_more = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 		if (one_more) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 			qtd_prev = qtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 			qtd = ehci_qtd_alloc(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 			if (unlikely(!qtd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 				goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 			qtd->urb = urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 			qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 			list_add_tail(&qtd->qtd_list, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 			/* never any data in such packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 			qtd_fill(qtd, 0, 0, token, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	/* by default, enable interrupt on urb completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	qtd->hw_token |= cpu_to_le32(QTD_IOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	return head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	qtd_list_free(oxu, urb, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) /* Each QH holds a qtd list; a QH is used for everything except iso.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)  * For interrupt urbs, the scheduler must set the microframe scheduling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)  * mask(s) each time the QH gets scheduled.  For highspeed, that's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)  * just one microframe in the s-mask.  For split interrupt transactions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)  * there are additional complications: c-mask, maybe FSTNs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) static struct ehci_qh *qh_make(struct oxu_hcd *oxu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 				struct urb *urb, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	struct ehci_qh *qh = oxu_qh_alloc(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	u32 info1 = 0, info2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	int is_input, type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	int maxp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	if (!qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		return qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	 * init endpoint/device data for this QH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	info1 |= usb_pipeendpoint(urb->pipe) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	info1 |= usb_pipedevice(urb->pipe) << 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	is_input = usb_pipein(urb->pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	type = usb_pipetype(urb->pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	/* Compute interrupt scheduling parameters just once, and save.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	 * - allowing for high bandwidth, how many nsec/uframe are used?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	 * - split transactions need a second CSPLIT uframe; same question
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	 * - splits also need a schedule gap (for full/low speed I/O)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	 * - qh has a polling interval
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	 * For control/bulk requests, the HC or TT handles these.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	if (type == PIPE_INTERRUPT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 								is_input, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 				hb_mult(maxp) * max_packet(maxp)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 		qh->start = NO_FRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 		if (urb->dev->speed == USB_SPEED_HIGH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 			qh->c_usecs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 			qh->gap_uf = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 			qh->period = urb->interval >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 			if (qh->period == 0 && urb->interval != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 				/* NOTE interval 2 or 4 uframes could work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 				 * But interval 1 scheduling is simpler, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 				 * includes high bandwidth.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 				oxu_dbg(oxu, "intr period %d uframes, NYET!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 					urb->interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 				goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 			struct usb_tt	*tt = urb->dev->tt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 			int		think_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 			/* gap is f(FS/LS transfer times) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 			qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 					is_input, 0, maxp) / (125 * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 			/* FIXME this just approximates SPLIT/CSPLIT times */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 			if (is_input) {		/* SPLIT, gap, CSPLIT+DATA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 				qh->c_usecs = qh->usecs + HS_USECS(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 				qh->usecs = HS_USECS(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 			} else {		/* SPLIT+DATA, gap, CSPLIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 				qh->usecs += HS_USECS(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 				qh->c_usecs = HS_USECS(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 			think_time = tt ? tt->think_time : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 			qh->tt_usecs = NS_TO_US(think_time +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 					usb_calc_bus_time(urb->dev->speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 					is_input, 0, max_packet(maxp)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 			qh->period = urb->interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	/* support for tt scheduling, and access to toggles */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 	qh->dev = urb->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	/* using TT? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	switch (urb->dev->speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	case USB_SPEED_LOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 		info1 |= (1 << 12);	/* EPS "low" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	case USB_SPEED_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 		/* EPS 0 means "full" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 		if (type != PIPE_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 			info1 |= (EHCI_TUNE_RL_TT << 28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 		if (type == PIPE_CONTROL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 			info1 |= (1 << 27);	/* for TT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 			info1 |= 1 << 14;	/* toggle from qtd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 		info1 |= maxp << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 		info2 |= (EHCI_TUNE_MULT_TT << 30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 		info2 |= urb->dev->ttport << 23;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 		/* NOTE:  if (PIPE_INTERRUPT) { scheduler sets c-mask } */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	case USB_SPEED_HIGH:		/* no TT involved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 		info1 |= (2 << 12);	/* EPS "high" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 		if (type == PIPE_CONTROL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 			info1 |= (EHCI_TUNE_RL_HS << 28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 			info1 |= 64 << 16;	/* usb2 fixed maxpacket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 			info1 |= 1 << 14;	/* toggle from qtd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 			info2 |= (EHCI_TUNE_MULT_HS << 30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 		} else if (type == PIPE_BULK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 			info1 |= (EHCI_TUNE_RL_HS << 28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 			info1 |= 512 << 16;	/* usb2 fixed maxpacket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 			info2 |= (EHCI_TUNE_MULT_HS << 30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 		} else {		/* PIPE_INTERRUPT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 			info1 |= max_packet(maxp) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 			info2 |= hb_mult(maxp) << 30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 		oxu_dbg(oxu, "bogus dev %p speed %d\n", urb->dev, urb->dev->speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 		qh_put(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	/* NOTE:  if (PIPE_INTERRUPT) { scheduler sets s-mask } */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	/* init as live, toggle clear, advance to dummy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	qh->qh_state = QH_STATE_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	qh->hw_info1 = cpu_to_le32(info1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	qh->hw_info2 = cpu_to_le32(info2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	qh_refresh(oxu, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	return qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) /* Move qh (and its qtds) onto async queue; maybe enable queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) static void qh_link_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	__le32 dma = QH_NEXT(qh->qh_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	struct ehci_qh *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 	/* (re)start the async schedule? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	head = oxu->async;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 	timer_action_done(oxu, TIMER_ASYNC_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	if (!head->qh_next.qh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 		u32	cmd = readl(&oxu->regs->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 		if (!(cmd & CMD_ASE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 			/* in case a clear of CMD_ASE didn't take yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 			(void)handshake(oxu, &oxu->regs->status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 					STS_ASS, 0, 150);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 			cmd |= CMD_ASE | CMD_RUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 			writel(cmd, &oxu->regs->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 			oxu_to_hcd(oxu)->state = HC_STATE_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 			/* posted write need not be known to HC yet ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 	/* clear halt and/or toggle; and maybe recover from silicon quirk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	if (qh->qh_state == QH_STATE_IDLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 		qh_refresh(oxu, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	/* splice right after start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	qh->qh_next = head->qh_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	qh->hw_next = head->hw_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	head->qh_next.qh = qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	head->hw_next = dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	qh->qh_state = QH_STATE_LINKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	/* qtd completions reported later by interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) #define	QH_ADDR_MASK	cpu_to_le32(0x7f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)  * For control/bulk/interrupt, return QH with these TDs appended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)  * Allocates and initializes the QH if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)  * Returns null if it can't allocate a QH it needs to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)  * If the QH has TDs (urbs) already, that's great.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) static struct ehci_qh *qh_append_tds(struct oxu_hcd *oxu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 				struct urb *urb, struct list_head *qtd_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 				int epnum, void	**ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	struct ehci_qh *qh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	qh = (struct ehci_qh *) *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	if (unlikely(qh == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		/* can't sleep here, we have oxu->lock... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		qh = qh_make(oxu, urb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 		*ptr = qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 	if (likely(qh != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 		struct ehci_qtd	*qtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 		if (unlikely(list_empty(qtd_list)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 			qtd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 			qtd = list_entry(qtd_list->next, struct ehci_qtd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 					qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 		/* control qh may need patching ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 		if (unlikely(epnum == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 			/* usb_reset_device() briefly reverts to address 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 			if (usb_pipedevice(urb->pipe) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 				qh->hw_info1 &= ~QH_ADDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 		/* just one way to queue requests: swap with the dummy qtd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 		 * only hc or qh_refresh() ever modify the overlay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 		if (likely(qtd != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 			struct ehci_qtd	*dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 			dma_addr_t dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 			__le32 token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 			/* to avoid racing the HC, use the dummy td instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 			 * the first td of our list (becomes new dummy).  both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 			 * tds stay deactivated until we're done, when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 			 * HC is allowed to fetch the old dummy (4.10.2).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 			token = qtd->hw_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 			qtd->hw_token = HALT_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 			wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 			dummy = qh->dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 			dma = dummy->qtd_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 			*dummy = *qtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 			dummy->qtd_dma = dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 			list_del(&qtd->qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 			list_add(&dummy->qtd_list, qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 			list_splice(qtd_list, qh->qtd_list.prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 			ehci_qtd_init(qtd, qtd->qtd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 			qh->dummy = qtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 			/* hc must see the new dummy at list end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 			dma = qtd->qtd_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 			qtd = list_entry(qh->qtd_list.prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 					struct ehci_qtd, qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 			qtd->hw_next = QTD_NEXT(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 			/* let the hc process these next qtds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 			dummy->hw_token = (token & ~(0x80));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 			wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 			dummy->hw_token = token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 			urb->hcpriv = qh_get(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	return qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) static int submit_async(struct oxu_hcd	*oxu, struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 			struct list_head *qtd_list, gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	int epnum = urb->ep->desc.bEndpointAddress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	struct ehci_qh *qh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) #ifdef OXU_URB_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	struct ehci_qtd	*qtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	oxu_dbg(oxu, "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 		__func__, urb->dev->devpath, urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 		epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 		urb->transfer_buffer_length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 		qtd, urb->ep->hcpriv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	spin_lock_irqsave(&oxu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 		rc = -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	if (unlikely(qh == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	/* Control/bulk operations through TTs don't need scheduling,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	 * the HC and TT handle it when the TT has a buffer ready.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	if (likely(qh->qh_state == QH_STATE_IDLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 		qh_link_async(oxu, qh_get(qh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	spin_unlock_irqrestore(&oxu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	if (unlikely(qh == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 		qtd_list_free(oxu, urb, qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) /* The async qh for the qtds being reclaimed are now unlinked from the HC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) static void end_unlink_async(struct oxu_hcd *oxu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	struct ehci_qh *qh = oxu->reclaim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	struct ehci_qh *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	timer_action_done(oxu, TIMER_IAA_WATCHDOG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	qh->qh_state = QH_STATE_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	qh->qh_next.qh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	qh_put(qh);			/* refcount from reclaim */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	/* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	next = qh->reclaim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	oxu->reclaim = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	oxu->reclaim_ready = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	qh->reclaim = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	qh_completions(oxu, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	if (!list_empty(&qh->qtd_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 			&& HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 		qh_link_async(oxu, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 		qh_put(qh);		/* refcount from async list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 		/* it's not free to turn the async schedule on/off; leave it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 		 * active but idle for a while once it empties.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 		if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 				&& oxu->async->qh_next.qh == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 			timer_action(oxu, TIMER_ASYNC_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	if (next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 		oxu->reclaim = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 		start_unlink_async(oxu, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) /* makes sure the async qh will become idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) /* caller must own oxu->lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	int cmd = readl(&oxu->regs->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	struct ehci_qh *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	assert_spin_locked(&oxu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	BUG_ON(oxu->reclaim || (qh->qh_state != QH_STATE_LINKED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 				&& qh->qh_state != QH_STATE_UNLINK_WAIT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	/* stop async schedule right now? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	if (unlikely(qh == oxu->async)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 		/* can't get here without STS_ASS set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 		if (oxu_to_hcd(oxu)->state != HC_STATE_HALT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 				&& !oxu->reclaim) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 			/* ... and CMD_IAAD clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 			writel(cmd & ~CMD_ASE, &oxu->regs->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 			wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 			/* handshake later, if we need to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 			timer_action_done(oxu, TIMER_ASYNC_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	qh->qh_state = QH_STATE_UNLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	oxu->reclaim = qh = qh_get(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	prev = oxu->async;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	while (prev->qh_next.qh != qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 		prev = prev->qh_next.qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	prev->hw_next = qh->hw_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 	prev->qh_next = qh->qh_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	if (unlikely(oxu_to_hcd(oxu)->state == HC_STATE_HALT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 		/* if (unlikely(qh->reclaim != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 		 *	this will recurse, probably not much
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 		end_unlink_async(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 	oxu->reclaim_ready = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	cmd |= CMD_IAAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	writel(cmd, &oxu->regs->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	(void) readl(&oxu->regs->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	timer_action(oxu, TIMER_IAA_WATCHDOG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) static void scan_async(struct oxu_hcd *oxu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 	struct ehci_qh *qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 	enum ehci_timer_action action = TIMER_IO_WATCHDOG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	if (!++(oxu->stamp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 		oxu->stamp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	timer_action_done(oxu, TIMER_ASYNC_SHRINK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) rescan:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	qh = oxu->async->qh_next.qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	if (likely(qh != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 			/* clean any finished work for this qh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 			if (!list_empty(&qh->qtd_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 					&& qh->stamp != oxu->stamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 				int temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 				/* unlinks could happen here; completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 				 * reporting drops the lock.  rescan using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 				 * the latest schedule, but don't rescan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 				 * qhs we already finished (no looping).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 				qh = qh_get(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 				qh->stamp = oxu->stamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 				temp = qh_completions(oxu, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 				qh_put(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 				if (temp != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 					goto rescan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 			/* unlink idle entries, reducing HC PCI usage as well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 			 * as HCD schedule-scanning costs.  delay for any qh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 			 * we just scanned, there's a not-unusual case that it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 			 * doesn't stay idle for long.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 			 * (plus, avoids some kind of re-activation race.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 			if (list_empty(&qh->qtd_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 				if (qh->stamp == oxu->stamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 					action = TIMER_ASYNC_SHRINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 				else if (!oxu->reclaim
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 					    && qh->qh_state == QH_STATE_LINKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 					start_unlink_async(oxu, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 			qh = qh->qh_next.qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 		} while (qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 	if (action == TIMER_ASYNC_SHRINK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 		timer_action(oxu, TIMER_ASYNC_SHRINK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)  * periodic_next_shadow - return "next" pointer on shadow list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227)  * @periodic: host pointer to qh/itd/sitd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)  * @tag: hardware tag for type of this record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) static union ehci_shadow *periodic_next_shadow(union ehci_shadow *periodic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 						__le32 tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	switch (tag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	case Q_TYPE_QH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 		return &periodic->qh->qh_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) /* caller must hold oxu->lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) static void periodic_unlink(struct oxu_hcd *oxu, unsigned frame, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	union ehci_shadow *prev_p = &oxu->pshadow[frame];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	__le32 *hw_p = &oxu->periodic[frame];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	union ehci_shadow here = *prev_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	/* find predecessor of "ptr"; hw and shadow lists are in sync */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 	while (here.ptr && here.ptr != ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 		prev_p = periodic_next_shadow(prev_p, Q_NEXT_TYPE(*hw_p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 		hw_p = here.hw_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 		here = *prev_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 	/* an interrupt entry (at list end) could have been shared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	if (!here.ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	/* update shadow and hardware lists ... the old "next" pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 	 * from ptr may still be in use, the caller updates them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 	*prev_p = *periodic_next_shadow(&here, Q_NEXT_TYPE(*hw_p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 	*hw_p = *here.hw_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) /* how many of the uframe's 125 usecs are allocated? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) static unsigned short periodic_usecs(struct oxu_hcd *oxu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 					unsigned frame, unsigned uframe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 	__le32 *hw_p = &oxu->periodic[frame];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 	union ehci_shadow *q = &oxu->pshadow[frame];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 	unsigned usecs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	while (q->ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 		switch (Q_NEXT_TYPE(*hw_p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 		case Q_TYPE_QH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 			/* is it in the S-mask? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 			if (q->qh->hw_info2 & cpu_to_le32(1 << uframe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 				usecs += q->qh->usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 			/* ... or C-mask? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 			if (q->qh->hw_info2 & cpu_to_le32(1 << (8 + uframe)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 				usecs += q->qh->c_usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 			hw_p = &q->qh->hw_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 			q = &q->qh->qh_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	if (usecs > 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 		oxu_err(oxu, "uframe %d sched overrun: %d usecs\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 						frame * 8 + uframe, usecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 	return usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) static int enable_periodic(struct oxu_hcd *oxu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 	u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	/* did clearing PSE did take effect yet?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	 * takes effect only at frame boundaries...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 	status = handshake(oxu, &oxu->regs->status, STS_PSS, 0, 9 * 125);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 	if (status != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 		oxu_to_hcd(oxu)->state = HC_STATE_HALT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 		usb_hc_died(oxu_to_hcd(oxu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 	cmd = readl(&oxu->regs->command) | CMD_PSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	writel(cmd, &oxu->regs->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 	/* posted write ... PSS happens later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	oxu_to_hcd(oxu)->state = HC_STATE_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	/* make sure ehci_work scans these */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 	oxu->next_uframe = readl(&oxu->regs->frame_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 		% (oxu->periodic_size << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) static int disable_periodic(struct oxu_hcd *oxu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 	u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 	/* did setting PSE not take effect yet?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	 * takes effect only at frame boundaries...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 	status = handshake(oxu, &oxu->regs->status, STS_PSS, STS_PSS, 9 * 125);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	if (status != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 		oxu_to_hcd(oxu)->state = HC_STATE_HALT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 		usb_hc_died(oxu_to_hcd(oxu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	cmd = readl(&oxu->regs->command) & ~CMD_PSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 	writel(cmd, &oxu->regs->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	/* posted write ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	oxu->next_uframe = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) /* periodic schedule slots have iso tds (normal or split) first, then a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345)  * sparse tree for active interrupt transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347)  * this just links in a qh; caller guarantees uframe masks are set right.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348)  * no FSTN support (yet; oxu 0.96+)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) static int qh_link_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 	unsigned period = qh->period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	dev_dbg(&qh->dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 		"link qh%d-%04x/%p start %d [%d/%d us]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 		period, le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 		qh, qh->start, qh->usecs, qh->c_usecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 	/* high bandwidth, or otherwise every microframe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 	if (period == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 		period = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	for (i = qh->start; i < oxu->periodic_size; i += period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 		union ehci_shadow	*prev = &oxu->pshadow[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 		__le32			*hw_p = &oxu->periodic[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 		union ehci_shadow	here = *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 		__le32			type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 		/* skip the iso nodes at list head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 		while (here.ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 			type = Q_NEXT_TYPE(*hw_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 			if (type == Q_TYPE_QH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 			prev = periodic_next_shadow(prev, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 			hw_p = &here.qh->hw_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 			here = *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 		/* sorting each branch by period (slow-->fast)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 		 * enables sharing interior tree nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 		while (here.ptr && qh != here.qh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 			if (qh->period > here.qh->period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 			prev = &here.qh->qh_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 			hw_p = &here.qh->hw_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 			here = *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 		/* link in this qh, unless some earlier pass did that */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 		if (qh != here.qh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 			qh->qh_next = here;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 			if (here.qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 				qh->hw_next = *hw_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 			wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 			prev->qh = qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 			*hw_p = QH_NEXT(qh->qh_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	qh->qh_state = QH_STATE_LINKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 	qh_get(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	/* update per-qh bandwidth for usbfs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	oxu_to_hcd(oxu)->self.bandwidth_allocated += qh->period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 		? ((qh->usecs + qh->c_usecs) / qh->period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 		: (qh->usecs * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 	/* maybe enable periodic schedule processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	if (!oxu->periodic_sched++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 		return enable_periodic(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) static void qh_unlink_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 	unsigned period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 	/* FIXME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 	 *   IF this isn't high speed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 	 *   and this qh is active in the current uframe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 	 *   (and overlay token SplitXstate is false?)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 	 * THEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 	 *   qh->hw_info1 |= cpu_to_le32(1 << 7 "ignore");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 	/* high bandwidth, or otherwise part of every microframe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 	period = qh->period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 	if (period == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 		period = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 	for (i = qh->start; i < oxu->periodic_size; i += period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 		periodic_unlink(oxu, i, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 	/* update per-qh bandwidth for usbfs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 	oxu_to_hcd(oxu)->self.bandwidth_allocated -= qh->period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 		? ((qh->usecs + qh->c_usecs) / qh->period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 		: (qh->usecs * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 	dev_dbg(&qh->dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 		"unlink qh%d-%04x/%p start %d [%d/%d us]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 		qh->period,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 		le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 		qh, qh->start, qh->usecs, qh->c_usecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 	/* qh->qh_next still "live" to HC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	qh->qh_state = QH_STATE_UNLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 	qh->qh_next.ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 	qh_put(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 	/* maybe turn off periodic schedule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 	oxu->periodic_sched--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 	if (!oxu->periodic_sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 		(void) disable_periodic(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 	unsigned wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 	qh_unlink_periodic(oxu, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 	/* simple/paranoid:  always delay, expecting the HC needs to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 	 * qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 	 * expect hub_wq to clean up after any CSPLITs we won't issue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 	 * active high speed queues may need bigger delays...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 	if (list_empty(&qh->qtd_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 		|| (cpu_to_le32(QH_CMASK) & qh->hw_info2) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 		wait = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 		wait = 55;	/* worst case: 3 * 1024 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 	udelay(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 	qh->qh_state = QH_STATE_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 	qh->hw_next = EHCI_LIST_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) static int check_period(struct oxu_hcd *oxu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 			unsigned frame, unsigned uframe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 			unsigned period, unsigned usecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 	int claimed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	/* complete split running into next frame?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 	 * given FSTN support, we could sometimes check...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 	if (uframe >= 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 	 * 80% periodic == 100 usec/uframe available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 	 * convert "usecs we need" to "max already claimed"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	usecs = 100 - usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 	/* we "know" 2 and 4 uframe intervals were rejected; so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 	 * for period 0, check _every_ microframe in the schedule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	if (unlikely(period == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 			for (uframe = 0; uframe < 7; uframe++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 				claimed = periodic_usecs(oxu, frame, uframe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 				if (claimed > usecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 					return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 		} while ((frame += 1) < oxu->periodic_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 	/* just check the specified uframe, at that period */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 			claimed = periodic_usecs(oxu, frame, uframe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 			if (claimed > usecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 		} while ((frame += period) < oxu->periodic_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) static int check_intr_schedule(struct oxu_hcd	*oxu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 				unsigned frame, unsigned uframe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 				const struct ehci_qh *qh, __le32 *c_maskp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	int retval = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 	if (qh->c_usecs && uframe >= 6)		/* FSTN territory? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 	if (!check_period(oxu, frame, uframe, qh->period, qh->usecs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 	if (!qh->c_usecs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 		retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 		*c_maskp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) /* "first fit" scheduling policy used the first time through,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)  * or when the previous schedule slot can't be re-used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 	int		status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 	unsigned	uframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 	__le32		c_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 	unsigned	frame;		/* 0..(qh->period - 1), or NO_FRAME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 	qh_refresh(oxu, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 	qh->hw_next = EHCI_LIST_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 	frame = qh->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 	/* reuse the previous schedule slots, if we can */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 	if (frame < qh->period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 		uframe = ffs(le32_to_cpup(&qh->hw_info2) & QH_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 		status = check_intr_schedule(oxu, frame, --uframe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 				qh, &c_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 		uframe = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 		c_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 		status = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 	/* else scan the schedule to find a group of slots such that all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 	 * uframes have enough periodic bandwidth available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 	if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 		/* "normal" case, uframing flexible except with splits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 		if (qh->period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 			frame = qh->period - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 			do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 				for (uframe = 0; uframe < 8; uframe++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 					status = check_intr_schedule(oxu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 							frame, uframe, qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 							&c_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 					if (status == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 						break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 			} while (status && frame--);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 		/* qh->period == 0 means every uframe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 			frame = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 			status = check_intr_schedule(oxu, 0, 0, qh, &c_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 		if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 		qh->start = frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 		/* reset S-frame and (maybe) C-frame masks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 		qh->hw_info2 &= cpu_to_le32(~(QH_CMASK | QH_SMASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 		qh->hw_info2 |= qh->period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 			? cpu_to_le32(1 << uframe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 			: cpu_to_le32(QH_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 		qh->hw_info2 |= c_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 		oxu_dbg(oxu, "reused qh %p schedule\n", qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 	/* stuff into the periodic schedule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 	status = qh_link_periodic(oxu, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) static int intr_submit(struct oxu_hcd *oxu, struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 			struct list_head *qtd_list, gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 	unsigned epnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 	struct ehci_qh *qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 	int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 	struct list_head	empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	/* get endpoint and transfer/schedule data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 	epnum = urb->ep->desc.bEndpointAddress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 	spin_lock_irqsave(&oxu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 	if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 		status = -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	/* get qh and force any scheduling errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 	INIT_LIST_HEAD(&empty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 	qh = qh_append_tds(oxu, urb, &empty, epnum, &urb->ep->hcpriv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 	if (qh == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 		status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 	if (qh->qh_state == QH_STATE_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 		status = qh_schedule(oxu, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 		if (status != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 	/* then queue the urb's tds to the qh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 	qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 	BUG_ON(qh == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 	/* ... update usbfs periodic stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 	oxu_to_hcd(oxu)->self.bandwidth_int_reqs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 	spin_unlock_irqrestore(&oxu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 	if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 		qtd_list_free(oxu, urb, qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) static inline int itd_submit(struct oxu_hcd *oxu, struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 						gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 	oxu_dbg(oxu, "iso support is missing!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 	return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) static inline int sitd_submit(struct oxu_hcd *oxu, struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 						gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 	oxu_dbg(oxu, "split iso support is missing!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 	return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) static void scan_periodic(struct oxu_hcd *oxu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 	unsigned frame, clock, now_uframe, mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 	unsigned modified;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 	mod = oxu->periodic_size << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 	 * When running, scan from last scan point up to "now"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 	 * else clean up by scanning everything that's left.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 	 * Touches as few pages as possible:  cache-friendly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 	now_uframe = oxu->next_uframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 	if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 		clock = readl(&oxu->regs->frame_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 		clock = now_uframe + mod - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 	clock %= mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 		union ehci_shadow	q, *q_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 		__le32			type, *hw_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 		/* don't scan past the live uframe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 		frame = now_uframe >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 		if (frame != (clock >> 3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 			/* safe to scan the whole frame at once */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 			now_uframe |= 0x07;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 		/* scan each element in frame's queue for completions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 		q_p = &oxu->pshadow[frame];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 		hw_p = &oxu->periodic[frame];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 		q.ptr = q_p->ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 		type = Q_NEXT_TYPE(*hw_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 		modified = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 		while (q.ptr != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 			union ehci_shadow temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 			switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 			case Q_TYPE_QH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 				/* handle any completions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 				temp.qh = qh_get(q.qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 				type = Q_NEXT_TYPE(q.qh->hw_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 				q = q.qh->qh_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 				modified = qh_completions(oxu, temp.qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 				if (unlikely(list_empty(&temp.qh->qtd_list)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 					intr_deschedule(oxu, temp.qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 				qh_put(temp.qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 				oxu_dbg(oxu, "corrupt type %d frame %d shadow %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 					type, frame, q.ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 				q.ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 			/* assume completion callbacks modify the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 			if (unlikely(modified))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 				goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 		/* Stop when we catch up to the HC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 		/* FIXME:  this assumes we won't get lapped when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 		 * latencies climb; that should be rare, but...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 		 * detect it, and just go all the way around.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 		 * FLR might help detect this case, so long as latencies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 		 * don't exceed periodic_size msec (default 1.024 sec).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 		/* FIXME: likewise assumes HC doesn't halt mid-scan */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 		if (now_uframe == clock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 			unsigned	now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 			if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 			oxu->next_uframe = now_uframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 			now = readl(&oxu->regs->frame_index) % mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 			if (now_uframe == now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 			/* rescan the rest of this frame, then ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 			clock = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 			now_uframe++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 			now_uframe %= mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) /* On some systems, leaving remote wakeup enabled prevents system shutdown.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765)  * The firmware seems to think that powering off is a wakeup event!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766)  * This routine turns off remote wakeup and everything else, on all ports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) static void ehci_turn_off_all_ports(struct oxu_hcd *oxu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 	int port = HCS_N_PORTS(oxu->hcs_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 	while (port--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 		writel(PORT_RWC_BITS, &oxu->regs->port_status[port]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) static void ehci_port_power(struct oxu_hcd *oxu, int is_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 	unsigned port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 	if (!HCS_PPC(oxu->hcs_params))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 	oxu_dbg(oxu, "...power%s ports...\n", is_on ? "up" : "down");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 	for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 		if (is_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 			oxu_hub_control(oxu_to_hcd(oxu), SetPortFeature,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 				USB_PORT_FEAT_POWER, port--, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 			oxu_hub_control(oxu_to_hcd(oxu), ClearPortFeature,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 				USB_PORT_FEAT_POWER, port--, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 	msleep(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) /* Called from some interrupts, timers, and so on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797)  * It calls driver completion functions, after dropping oxu->lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) static void ehci_work(struct oxu_hcd *oxu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 	timer_action_done(oxu, TIMER_IO_WATCHDOG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 	if (oxu->reclaim_ready)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 		end_unlink_async(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 	/* another CPU may drop oxu->lock during a schedule scan while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 	 * it reports urb completions.  this flag guards against bogus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 	 * attempts at re-entrant schedule scanning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 	if (oxu->scanning)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 	oxu->scanning = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 	scan_async(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 	if (oxu->next_uframe != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 		scan_periodic(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 	oxu->scanning = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 	/* the IO watchdog guards against hardware or driver bugs that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 	 * misplace IRQs, and should let us run completely without IRQs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 	 * such lossage has been observed on both VT6202 and VT8235.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 	if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 			(oxu->async->qh_next.ptr != NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 			 oxu->periodic_sched != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 		timer_action(oxu, TIMER_IO_WATCHDOG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 	/* if we need to use IAA and it's busy, defer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 	if (qh->qh_state == QH_STATE_LINKED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 			&& oxu->reclaim
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 			&& HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 		struct ehci_qh		*last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 		for (last = oxu->reclaim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 				last->reclaim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 				last = last->reclaim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 		qh->qh_state = QH_STATE_UNLINK_WAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 		last->reclaim = qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 	/* bypass IAA if the hc can't care */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 	} else if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state) && oxu->reclaim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 		end_unlink_async(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 	/* something else might have unlinked the qh by now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 	if (qh->qh_state == QH_STATE_LINKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 		start_unlink_async(oxu, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852)  * USB host controller methods
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 	u32 status, pcd_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 	int bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 	spin_lock(&oxu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 	status = readl(&oxu->regs->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 	/* e.g. cardbus physical eject */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 	if (status == ~(u32) 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 		oxu_dbg(oxu, "device removed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 		goto dead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 	/* Shared IRQ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 	status &= INTR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 	if (!status || unlikely(hcd->state == HC_STATE_HALT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 		spin_unlock(&oxu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 	/* clear (just) interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 	writel(status, &oxu->regs->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 	readl(&oxu->regs->command);	/* unblock posted write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 	bh = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) #ifdef OXU_VERBOSE_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 	/* unrequested/ignored: Frame List Rollover */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 	dbg_status(oxu, "irq", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 	/* INT, ERR, and IAA interrupt rates can be throttled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 	/* normal [4.15.1.2] or error [4.15.1.1] completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 	if (likely((status & (STS_INT|STS_ERR)) != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 		bh = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 	/* complete the unlinking of some qh [4.15.2.3] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 	if (status & STS_IAA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 		oxu->reclaim_ready = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 		bh = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 	/* remote wakeup [4.3.1] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 	if (status & STS_PCD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 		unsigned i = HCS_N_PORTS(oxu->hcs_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 		pcd_status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 		/* resume root hub? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 		if (!(readl(&oxu->regs->command) & CMD_RUN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 			usb_hcd_resume_root_hub(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 		while (i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 			int pstatus = readl(&oxu->regs->port_status[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 			if (pstatus & PORT_OWNER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 			if (!(pstatus & PORT_RESUME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 					|| oxu->reset_done[i] != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 			/* start USB_RESUME_TIMEOUT resume signaling from this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 			 * port, and make hub_wq collect PORT_STAT_C_SUSPEND to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 			 * stop that signaling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 			oxu->reset_done[i] = jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 				msecs_to_jiffies(USB_RESUME_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 			oxu_dbg(oxu, "port %d remote wakeup\n", i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 			mod_timer(&hcd->rh_timer, oxu->reset_done[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 	/* PCI errors [4.15.2.4] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 	if (unlikely((status & STS_FATAL) != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 		/* bogus "fatal" IRQs appear on some chips... why?  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 		status = readl(&oxu->regs->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 		dbg_cmd(oxu, "fatal", readl(&oxu->regs->command));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 		dbg_status(oxu, "fatal", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 		if (status & STS_HALT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 			oxu_err(oxu, "fatal error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) dead:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 			ehci_reset(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 			writel(0, &oxu->regs->configured_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 			usb_hc_died(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 			/* generic layer kills/unlinks all urbs, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 			 * uses oxu_stop to clean up the rest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 			bh = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 	if (bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 		ehci_work(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 	spin_unlock(&oxu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 	if (pcd_status & STS_PCD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 		usb_hcd_poll_rh_status(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) static irqreturn_t oxu_irq(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 	int ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 	u32 status = oxu_readl(hcd->regs, OXU_CHIPIRQSTATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 	u32 enable = oxu_readl(hcd->regs, OXU_CHIPIRQEN_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 	/* Disable all interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 	oxu_writel(hcd->regs, OXU_CHIPIRQEN_CLR, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 	if ((oxu->is_otg && (status & OXU_USBOTGI)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 		(!oxu->is_otg && (status & OXU_USBSPHI)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 		oxu210_hcd_irq(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 		ret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 	/* Enable all interrupt back */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 	oxu_writel(hcd->regs, OXU_CHIPIRQEN_SET, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) static void oxu_watchdog(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 	struct oxu_hcd	*oxu = from_timer(oxu, t, watchdog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 	spin_lock_irqsave(&oxu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 	/* lost IAA irqs wedge things badly; seen with a vt8235 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 	if (oxu->reclaim) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 		u32 status = readl(&oxu->regs->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 		if (status & STS_IAA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 			oxu_vdbg(oxu, "lost IAA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 			writel(STS_IAA, &oxu->regs->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 			oxu->reclaim_ready = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 	/* stop async processing after it's idled a bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 	if (test_bit(TIMER_ASYNC_OFF, &oxu->actions))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 		start_unlink_async(oxu, oxu->async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 	/* oxu could run by timer, without IRQs ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 	ehci_work(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 	spin_unlock_irqrestore(&oxu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) /* One-time init, only for memory state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) static int oxu_hcd_init(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 	u32 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 	u32 hcc_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 	spin_lock_init(&oxu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 	timer_setup(&oxu->watchdog, oxu_watchdog, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 	 * hw default: 1K periodic list heads, one per frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 	 * periodic_size can shrink by USBCMD update if hcc_params allows.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 	oxu->periodic_size = DEFAULT_I_TDPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 	retval = ehci_mem_init(oxu, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 	if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 		return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 	/* controllers may cache some of the periodic schedule ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 	hcc_params = readl(&oxu->caps->hcc_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 	if (HCC_ISOC_CACHE(hcc_params))		/* full frame cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 		oxu->i_thresh = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 	else					/* N microframes cached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 		oxu->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 	oxu->reclaim = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 	oxu->reclaim_ready = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 	oxu->next_uframe = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 	 * dedicate a qh for the async ring head, since we couldn't unlink
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 	 * a 'real' qh without stopping the async schedule [4.8].  use it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 	 * as the 'reclamation list head' too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 	 * its dummy is used in hw_alt_next of many tds, to prevent the qh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 	 * from automatically advancing to the next td after short reads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 	oxu->async->qh_next.qh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 	oxu->async->hw_next = QH_NEXT(oxu->async->qh_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 	oxu->async->hw_info1 = cpu_to_le32(QH_HEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 	oxu->async->hw_token = cpu_to_le32(QTD_STS_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 	oxu->async->hw_qtd_next = EHCI_LIST_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 	oxu->async->qh_state = QH_STATE_LINKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 	oxu->async->hw_alt_next = QTD_NEXT(oxu->async->dummy->qtd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 	/* clear interrupt enables, set irq latency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 	if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 		log2_irq_thresh = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 	temp = 1 << (16 + log2_irq_thresh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 	if (HCC_CANPARK(hcc_params)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 		/* HW default park == 3, on hardware that supports it (like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 		 * NVidia and ALI silicon), maximizes throughput on the async
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 		 * schedule by avoiding QH fetches between transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 		 * With fast usb storage devices and NForce2, "park" seems to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 		 * make problems:  throughput reduction (!), data errors...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 		if (park) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 			park = min(park, (unsigned) 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 			temp |= CMD_PARK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 			temp |= park << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 		oxu_dbg(oxu, "park %d\n", park);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 	if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 		/* periodic schedule size can be smaller than default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 		temp &= ~(3 << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 		temp |= (EHCI_TUNE_FLS << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 	oxu->command = temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) /* Called during probe() after chip reset completes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) static int oxu_reset(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 	spin_lock_init(&oxu->mem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 	INIT_LIST_HEAD(&oxu->urb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 	oxu->urb_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 	if (oxu->is_otg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 		oxu->caps = hcd->regs + OXU_OTG_CAP_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 		oxu->regs = hcd->regs + OXU_OTG_CAP_OFFSET + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 			HC_LENGTH(readl(&oxu->caps->hc_capbase));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) 		oxu->mem = hcd->regs + OXU_SPH_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 		oxu->caps = hcd->regs + OXU_SPH_CAP_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) 		oxu->regs = hcd->regs + OXU_SPH_CAP_OFFSET + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 			HC_LENGTH(readl(&oxu->caps->hc_capbase));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 		oxu->mem = hcd->regs + OXU_OTG_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 	oxu->hcs_params = readl(&oxu->caps->hcs_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 	oxu->sbrn = 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 	return oxu_hcd_init(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) static int oxu_run(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 	u32 temp, hcc_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 	hcd->uses_new_polling = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 	/* EHCI spec section 4.1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 	retval = ehci_reset(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 	if (retval != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 		ehci_mem_cleanup(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 		return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 	writel(oxu->periodic_dma, &oxu->regs->frame_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 	writel((u32) oxu->async->qh_dma, &oxu->regs->async_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 	/* hcc_params controls whether oxu->regs->segment must (!!!)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 	 * be used; it constrains QH/ITD/SITD and QTD locations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 	 * dma_pool consistent memory always uses segment zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 	 * streaming mappings for I/O buffers, like pci_map_single(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 	 * can return segments above 4GB, if the device allows.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 	 * NOTE:  the dma mask is visible through dev->dma_mask, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 	 * drivers can pass this info along ... like NETIF_F_HIGHDMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 	 * Scsi_Host.highmem_io, and so forth.  It's readonly to all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 	 * host side drivers though.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 	hcc_params = readl(&oxu->caps->hcc_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 	if (HCC_64BIT_ADDR(hcc_params))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 		writel(0, &oxu->regs->segment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 	oxu->command &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 				CMD_ASE | CMD_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 	oxu->command |= CMD_RUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 	writel(oxu->command, &oxu->regs->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 	dbg_cmd(oxu, "init", oxu->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 	 * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 	 * are explicitly handed to companion controller(s), so no TT is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 	 * involved with the root hub.  (Except where one is integrated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 	 * and there's no companion controller unless maybe for USB OTG.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 	hcd->state = HC_STATE_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 	writel(FLAG_CF, &oxu->regs->configured_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 	readl(&oxu->regs->command);	/* unblock posted writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 	temp = HC_VERSION(readl(&oxu->caps->hc_capbase));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 	oxu_info(oxu, "USB %x.%x started, quasi-EHCI %x.%02x, driver %s%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 		((oxu->sbrn & 0xf0)>>4), (oxu->sbrn & 0x0f),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 		temp >> 8, temp & 0xff, DRIVER_VERSION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 		ignore_oc ? ", overcurrent ignored" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 	writel(INTR_MASK, &oxu->regs->intr_enable); /* Turn On Interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) static void oxu_stop(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 	/* Turn off port power on all root hub ports. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 	ehci_port_power(oxu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 	/* no more interrupts ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 	del_timer_sync(&oxu->watchdog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 	spin_lock_irq(&oxu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 	if (HC_IS_RUNNING(hcd->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 		ehci_quiesce(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 	ehci_reset(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 	writel(0, &oxu->regs->intr_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 	spin_unlock_irq(&oxu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 	/* let companion controllers work when we aren't */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 	writel(0, &oxu->regs->configured_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 	/* root hub is shut down separately (first, when possible) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 	spin_lock_irq(&oxu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 	if (oxu->async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 		ehci_work(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 	spin_unlock_irq(&oxu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 	ehci_mem_cleanup(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 	dbg_status(oxu, "oxu_stop completed", readl(&oxu->regs->status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) /* Kick in for silicon on any bus (not just pci, etc).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204)  * This forcibly disables dma and IRQs, helping kexec and other cases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205)  * where the next system software may expect clean state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) static void oxu_shutdown(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 	(void) ehci_halt(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 	ehci_turn_off_all_ports(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 	/* make BIOS/etc use companion controller during reboot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 	writel(0, &oxu->regs->configured_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 	/* unblock posted writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 	readl(&oxu->regs->configured_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) /* Non-error returns are a promise to giveback() the urb later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222)  * we drop ownership so next owner (or urb unlink) can get it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224)  * urb + dev is in hcd.self.controller.urb_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225)  * we're queueing TDs onto software and hardware lists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227)  * hcd-specific init for hcpriv hasn't been done yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229)  * NOTE:  control, bulk, and interrupt share the same code to append TDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230)  * to a (possibly active) QH, and the same QH scanning code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) static int __oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 				gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 	struct list_head qtd_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 	INIT_LIST_HEAD(&qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) 	switch (usb_pipetype(urb->pipe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 	case PIPE_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 	case PIPE_BULK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 		if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 		return submit_async(oxu, urb, &qtd_list, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 	case PIPE_INTERRUPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 		if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 		return intr_submit(oxu, urb, &qtd_list, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 	case PIPE_ISOCHRONOUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 		if (urb->dev->speed == USB_SPEED_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 			return itd_submit(oxu, urb, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 			return sitd_submit(oxu, urb, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) /* This function is responsible for breaking URBs with big data size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262)  * into smaller size and processing small urbs in sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) static int oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 				gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 	int num, rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 	void *transfer_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 	struct urb *murb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 	/* If not bulk pipe just enqueue the URB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 	if (!usb_pipebulk(urb->pipe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 		return __oxu_urb_enqueue(hcd, urb, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 	/* Otherwise we should verify the USB transfer buffer size! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 	transfer_buffer = urb->transfer_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 	num = urb->transfer_buffer_length / 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 	rem = urb->transfer_buffer_length % 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 	if (rem != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 		num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 	/* If URB is smaller than 4096 bytes just enqueue it! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 	if (num == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 		return __oxu_urb_enqueue(hcd, urb, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 	/* Ok, we have more job to do! :) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 	for (i = 0; i < num - 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 		/* Get free micro URB poll till a free urb is received */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 			murb = (struct urb *) oxu_murb_alloc(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 			if (!murb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 				schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 		} while (!murb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 		/* Coping the urb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) 		memcpy(murb, urb, sizeof(struct urb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 		murb->transfer_buffer_length = 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 		murb->transfer_buffer = transfer_buffer + i * 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 		/* Null pointer for the encodes that this is a micro urb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) 		murb->complete = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 		((struct oxu_murb *) murb)->main = urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) 		((struct oxu_murb *) murb)->last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) 		/* This loop is to guarantee urb to be processed when there's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 		 * not enough resources at a particular time by retrying.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) 			ret  = __oxu_urb_enqueue(hcd, murb, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) 				schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 		} while (ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 	/* Last urb requires special handling  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 	/* Get free micro URB poll till a free urb is received */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 		murb = (struct urb *) oxu_murb_alloc(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 		if (!murb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 			schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 	} while (!murb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 	/* Coping the urb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 	memcpy(murb, urb, sizeof(struct urb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 	murb->transfer_buffer_length = rem > 0 ? rem : 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 	murb->transfer_buffer = transfer_buffer + (num - 1) * 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 	/* Null pointer for the encodes that this is a micro urb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 	murb->complete = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) 	((struct oxu_murb *) murb)->main = urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) 	((struct oxu_murb *) murb)->last = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) 		ret = __oxu_urb_enqueue(hcd, murb, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 			schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) 	} while (ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) /* Remove from hardware lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353)  * Completions normally happen asynchronously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) static int oxu_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) 	struct ehci_qh *qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 	spin_lock_irqsave(&oxu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 	switch (usb_pipetype(urb->pipe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 	case PIPE_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 	case PIPE_BULK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 		qh = (struct ehci_qh *) urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) 		if (!qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) 		unlink_async(oxu, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) 	case PIPE_INTERRUPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) 		qh = (struct ehci_qh *) urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) 		if (!qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) 		switch (qh->qh_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) 		case QH_STATE_LINKED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) 			intr_deschedule(oxu, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 		case QH_STATE_IDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 			qh_completions(oxu, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 			oxu_dbg(oxu, "bogus qh %p state %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 					qh, qh->qh_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 		/* reschedule QH iff another request is queued */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 		if (!list_empty(&qh->qtd_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 				&& HC_IS_RUNNING(hcd->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) 			int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 			status = qh_schedule(oxu, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 			spin_unlock_irqrestore(&oxu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) 			if (status != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) 				/* shouldn't happen often, but ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 				 * FIXME kill those tds' urbs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 				dev_err(hcd->self.controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 					"can't reschedule qh %p, err %d\n", qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) 					status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 			return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) 	spin_unlock_irqrestore(&oxu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) /* Bulk qh holds the data toggle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) static void oxu_endpoint_disable(struct usb_hcd *hcd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 					struct usb_host_endpoint *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) 	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 	struct ehci_qh		*qh, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) 	/* ASSERT:  any requests/urbs are being unlinked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) 	/* ASSERT:  nobody can be submitting urbs for this any more */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) rescan:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) 	spin_lock_irqsave(&oxu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) 	qh = ep->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) 	if (!qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 	/* endpoints can be iso streams.  for now, we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) 	 * accelerate iso completions ... so spin a while.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) 	if (qh->hw_info1 == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) 		oxu_vdbg(oxu, "iso delay\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) 		goto idle_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) 	if (!HC_IS_RUNNING(hcd->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) 		qh->qh_state = QH_STATE_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 	switch (qh->qh_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) 	case QH_STATE_LINKED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 		for (tmp = oxu->async->qh_next.qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) 				tmp && tmp != qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) 				tmp = tmp->qh_next.qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) 		/* periodic qh self-unlinks on empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) 		if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) 			goto nogood;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) 		unlink_async(oxu, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) 	case QH_STATE_UNLINK:		/* wait for hw to finish? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) idle_timeout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) 		spin_unlock_irqrestore(&oxu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) 		schedule_timeout_uninterruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) 		goto rescan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) 	case QH_STATE_IDLE:		/* fully unlinked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 		if (list_empty(&qh->qtd_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) 			qh_put(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) nogood:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) 		/* caller was supposed to have unlinked any requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 		 * that's not our job.  just leak this memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 		oxu_err(oxu, "qh %p (#%02x) state %d%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 			qh, ep->desc.bEndpointAddress, qh->qh_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 			list_empty(&qh->qtd_list) ? "" : "(has tds)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 	ep->hcpriv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 	spin_unlock_irqrestore(&oxu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) static int oxu_get_frame(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) 	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) 	return (readl(&oxu->regs->frame_index) >> 3) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) 		oxu->periodic_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) /* Build "status change" packet (one or two bytes) from HC registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) 	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 	u32 temp, mask, status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) 	int ports, i, retval = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) 	/* if !PM, root hub timers won't get shut down ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) 	if (!HC_IS_RUNNING(hcd->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) 	/* init status to no-changes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) 	buf[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 	ports = HCS_N_PORTS(oxu->hcs_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) 	if (ports > 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 		buf[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 		retval++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) 	/* Some boards (mostly VIA?) report bogus overcurrent indications,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 	 * causing massive log spam unless we completely ignore them.  It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) 	 * may be relevant that VIA VT8235 controllers, where PORT_POWER is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) 	 * always set, seem to clear PORT_OCC and PORT_CSC when writing to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 	 * PORT_POWER; that's surprising, but maybe within-spec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 	if (!ignore_oc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) 		mask = PORT_CSC | PORT_PEC | PORT_OCC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 		mask = PORT_CSC | PORT_PEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) 	/* no hub change reports (bit 0) for now (power, ...) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) 	/* port N changes (bit N)? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) 	spin_lock_irqsave(&oxu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 	for (i = 0; i < ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 		temp = readl(&oxu->regs->port_status[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 		 * Return status information even for ports with OWNER set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 		 * Otherwise hub_wq wouldn't see the disconnect event when a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 		 * high-speed device is switched over to the companion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 		 * controller by the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) 		if (!(temp & PORT_CONNECT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) 			oxu->reset_done[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) 		if ((temp & mask) != 0 || ((temp & PORT_RESUME) != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) 				time_after_eq(jiffies, oxu->reset_done[i]))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) 			if (i < 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) 				buf[0] |= 1 << (i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) 				buf[1] |= 1 << (i - 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) 			status = STS_PCD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) 	/* FIXME autosuspend idle root hubs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) 	spin_unlock_irqrestore(&oxu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) 	return status ? retval : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) /* Returns the speed of a device attached to a port on the root hub. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) static inline unsigned int oxu_port_speed(struct oxu_hcd *oxu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) 						unsigned int portsc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) 	switch ((portsc >> 26) & 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) 		return USB_PORT_STAT_LOW_SPEED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) 		return USB_PORT_STAT_HIGH_SPEED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) #define	PORT_WAKE_BITS	(PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) 				u16 wValue, u16 wIndex, char *buf, u16 wLength)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) 	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) 	int ports = HCS_N_PORTS(oxu->hcs_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) 	u32 __iomem *status_reg = &oxu->regs->port_status[wIndex - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) 	u32 temp, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) 	unsigned long	flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) 	int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) 	unsigned selector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) 	 * FIXME:  support SetPortFeatures USB_PORT_FEAT_INDICATOR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) 	 * HCS_INDICATOR may say we can change LEDs to off/amber/green.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) 	 * (track current state ourselves) ... blink for diagnostics,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) 	 * power, "this is the one", etc.  EHCI spec supports this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 	spin_lock_irqsave(&oxu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) 	switch (typeReq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) 	case ClearHubFeature:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) 		switch (wValue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) 		case C_HUB_LOCAL_POWER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) 		case C_HUB_OVER_CURRENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) 			/* no hub-wide feature/status flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) 	case ClearPortFeature:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) 		if (!wIndex || wIndex > ports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) 		wIndex--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) 		temp = readl(status_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 		 * Even if OWNER is set, so the port is owned by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) 		 * companion controller, hub_wq needs to be able to clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) 		 * the port-change status bits (especially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) 		 * USB_PORT_STAT_C_CONNECTION).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) 		switch (wValue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) 		case USB_PORT_FEAT_ENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) 			writel(temp & ~PORT_PE, status_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) 		case USB_PORT_FEAT_C_ENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) 			writel((temp & ~PORT_RWC_BITS) | PORT_PEC, status_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) 		case USB_PORT_FEAT_SUSPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) 			if (temp & PORT_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) 				goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) 			if (temp & PORT_SUSPEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) 				if ((temp & PORT_PE) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) 					goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) 				/* resume signaling for 20 msec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) 				temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) 				writel(temp | PORT_RESUME, status_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) 				oxu->reset_done[wIndex] = jiffies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) 						+ msecs_to_jiffies(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) 		case USB_PORT_FEAT_C_SUSPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) 			/* we auto-clear this feature */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) 		case USB_PORT_FEAT_POWER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) 			if (HCS_PPC(oxu->hcs_params))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) 				writel(temp & ~(PORT_RWC_BITS | PORT_POWER),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) 					  status_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) 		case USB_PORT_FEAT_C_CONNECTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) 			writel((temp & ~PORT_RWC_BITS) | PORT_CSC, status_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) 		case USB_PORT_FEAT_C_OVER_CURRENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) 			writel((temp & ~PORT_RWC_BITS) | PORT_OCC, status_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) 		case USB_PORT_FEAT_C_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) 			/* GetPortStatus clears reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) 		readl(&oxu->regs->command);	/* unblock posted write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) 	case GetHubDescriptor:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) 		ehci_hub_descriptor(oxu, (struct usb_hub_descriptor *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) 			buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) 	case GetHubStatus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) 		/* no hub-wide feature/status flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) 		memset(buf, 0, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) 	case GetPortStatus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) 		if (!wIndex || wIndex > ports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) 		wIndex--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) 		status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) 		temp = readl(status_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) 		/* wPortChange bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) 		if (temp & PORT_CSC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) 			status |= USB_PORT_STAT_C_CONNECTION << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) 		if (temp & PORT_PEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) 			status |= USB_PORT_STAT_C_ENABLE << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) 		if ((temp & PORT_OCC) && !ignore_oc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) 			status |= USB_PORT_STAT_C_OVERCURRENT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) 		/* whoever resumes must GetPortStatus to complete it!! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) 		if (temp & PORT_RESUME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) 			/* Remote Wakeup received? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) 			if (!oxu->reset_done[wIndex]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) 				/* resume signaling for 20 msec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) 				oxu->reset_done[wIndex] = jiffies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) 						+ msecs_to_jiffies(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) 				/* check the port again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) 				mod_timer(&oxu_to_hcd(oxu)->rh_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) 						oxu->reset_done[wIndex]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) 			/* resume completed? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) 			else if (time_after_eq(jiffies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) 					oxu->reset_done[wIndex])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) 				status |= USB_PORT_STAT_C_SUSPEND << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) 				oxu->reset_done[wIndex] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) 				/* stop resume signaling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) 				temp = readl(status_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) 				writel(temp & ~(PORT_RWC_BITS | PORT_RESUME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) 					status_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) 				retval = handshake(oxu, status_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) 					   PORT_RESUME, 0, 2000 /* 2msec */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) 				if (retval != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) 					oxu_err(oxu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) 						"port %d resume error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) 						wIndex + 1, retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) 					goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) 				temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) 		/* whoever resets must GetPortStatus to complete it!! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) 		if ((temp & PORT_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) 				&& time_after_eq(jiffies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) 					oxu->reset_done[wIndex])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) 			status |= USB_PORT_STAT_C_RESET << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) 			oxu->reset_done[wIndex] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) 			/* force reset to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) 			writel(temp & ~(PORT_RWC_BITS | PORT_RESET),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) 					status_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) 			/* REVISIT:  some hardware needs 550+ usec to clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) 			 * this bit; seems too long to spin routinely...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) 			retval = handshake(oxu, status_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) 					PORT_RESET, 0, 750);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) 			if (retval != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) 				oxu_err(oxu, "port %d reset error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) 					wIndex + 1, retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) 				goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) 			/* see what we found out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) 			temp = check_reset_complete(oxu, wIndex, status_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) 					readl(status_reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) 		/* transfer dedicated ports to the companion hc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) 		if ((temp & PORT_CONNECT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) 				test_bit(wIndex, &oxu->companion_ports)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) 			temp &= ~PORT_RWC_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) 			temp |= PORT_OWNER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) 			writel(temp, status_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) 			oxu_dbg(oxu, "port %d --> companion\n", wIndex + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) 			temp = readl(status_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) 		 * Even if OWNER is set, there's no harm letting hub_wq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) 		 * see the wPortStatus values (they should all be 0 except
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) 		 * for PORT_POWER anyway).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) 		if (temp & PORT_CONNECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) 			status |= USB_PORT_STAT_CONNECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) 			/* status may be from integrated TT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) 			status |= oxu_port_speed(oxu, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) 		if (temp & PORT_PE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) 			status |= USB_PORT_STAT_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) 		if (temp & (PORT_SUSPEND|PORT_RESUME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) 			status |= USB_PORT_STAT_SUSPEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) 		if (temp & PORT_OC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) 			status |= USB_PORT_STAT_OVERCURRENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) 		if (temp & PORT_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) 			status |= USB_PORT_STAT_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) 		if (temp & PORT_POWER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) 			status |= USB_PORT_STAT_POWER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) #ifndef	OXU_VERBOSE_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) 	if (status & ~0xffff)	/* only if wPortChange is interesting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) 		dbg_port(oxu, "GetStatus", wIndex + 1, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) 		put_unaligned(cpu_to_le32(status), (__le32 *) buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) 	case SetHubFeature:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) 		switch (wValue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) 		case C_HUB_LOCAL_POWER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) 		case C_HUB_OVER_CURRENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) 			/* no hub-wide feature/status flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) 	case SetPortFeature:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) 		selector = wIndex >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) 		wIndex &= 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) 		if (!wIndex || wIndex > ports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) 		wIndex--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) 		temp = readl(status_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) 		if (temp & PORT_OWNER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) 		temp &= ~PORT_RWC_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) 		switch (wValue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) 		case USB_PORT_FEAT_SUSPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) 			if ((temp & PORT_PE) == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) 					|| (temp & PORT_RESET) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) 				goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) 			if (device_may_wakeup(&hcd->self.root_hub->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) 				temp |= PORT_WAKE_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) 			writel(temp | PORT_SUSPEND, status_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) 		case USB_PORT_FEAT_POWER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) 			if (HCS_PPC(oxu->hcs_params))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) 				writel(temp | PORT_POWER, status_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) 		case USB_PORT_FEAT_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) 			if (temp & PORT_RESUME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) 				goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) 			/* line status bits may report this as low speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) 			 * which can be fine if this root hub has a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) 			 * transaction translator built in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) 			oxu_vdbg(oxu, "port %d reset\n", wIndex + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) 			temp |= PORT_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) 			temp &= ~PORT_PE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) 			 * caller must wait, then call GetPortStatus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) 			 * usb 2.0 spec says 50 ms resets on root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) 			oxu->reset_done[wIndex] = jiffies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) 					+ msecs_to_jiffies(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) 			writel(temp, status_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) 		/* For downstream facing ports (these):  one hub port is put
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) 		 * into test mode according to USB2 11.24.2.13, then the hub
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) 		 * must be reset (which for root hub now means rmmod+modprobe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) 		 * or else system reboot).  See EHCI 2.3.9 and 4.14 for info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) 		 * about the EHCI-specific stuff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) 		case USB_PORT_FEAT_TEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) 			if (!selector || selector > 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) 				goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) 			ehci_quiesce(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) 			ehci_halt(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) 			temp |= selector << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) 			writel(temp, status_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) 		readl(&oxu->regs->command);	/* unblock posted writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) 		/* "stall" on error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) 		retval = -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) 	spin_unlock_irqrestore(&oxu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) static int oxu_bus_suspend(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) 	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) 	int port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) 	int mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) 	oxu_dbg(oxu, "suspend root hub\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) 	if (time_before(jiffies, oxu->next_statechange))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) 		msleep(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) 	port = HCS_N_PORTS(oxu->hcs_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) 	spin_lock_irq(&oxu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) 	/* stop schedules, clean any completed work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) 	if (HC_IS_RUNNING(hcd->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) 		ehci_quiesce(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) 		hcd->state = HC_STATE_QUIESCING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) 	oxu->command = readl(&oxu->regs->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) 	if (oxu->reclaim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) 		oxu->reclaim_ready = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) 	ehci_work(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) 	/* Unlike other USB host controller types, EHCI doesn't have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) 	 * any notion of "global" or bus-wide suspend.  The driver has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) 	 * to manually suspend all the active unsuspended ports, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) 	 * then manually resume them in the bus_resume() routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) 	oxu->bus_suspended = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) 	while (port--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) 		u32 __iomem *reg = &oxu->regs->port_status[port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) 		u32 t1 = readl(reg) & ~PORT_RWC_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) 		u32 t2 = t1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) 		/* keep track of which ports we suspend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) 		if ((t1 & PORT_PE) && !(t1 & PORT_OWNER) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) 				!(t1 & PORT_SUSPEND)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) 			t2 |= PORT_SUSPEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) 			set_bit(port, &oxu->bus_suspended);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) 		/* enable remote wakeup on all ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) 		if (device_may_wakeup(&hcd->self.root_hub->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) 			t2 |= PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) 			t2 &= ~(PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) 		if (t1 != t2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) 			oxu_vdbg(oxu, "port %d, %08x -> %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) 				port + 1, t1, t2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) 			writel(t2, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) 	/* turn off now-idle HC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) 	del_timer_sync(&oxu->watchdog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) 	ehci_halt(oxu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) 	hcd->state = HC_STATE_SUSPENDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) 	/* allow remote wakeup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) 	mask = INTR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) 	if (!device_may_wakeup(&hcd->self.root_hub->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) 		mask &= ~STS_PCD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) 	writel(mask, &oxu->regs->intr_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) 	readl(&oxu->regs->intr_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) 	oxu->next_statechange = jiffies + msecs_to_jiffies(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) 	spin_unlock_irq(&oxu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) /* Caller has locked the root hub, and should reset/reinit on error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) static int oxu_bus_resume(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) 	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) 	u32 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) 	if (time_before(jiffies, oxu->next_statechange))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) 		msleep(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) 	spin_lock_irq(&oxu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) 	/* Ideally and we've got a real resume here, and no port's power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) 	 * was lost.  (For PCI, that means Vaux was maintained.)  But we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) 	 * could instead be restoring a swsusp snapshot -- so that BIOS was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) 	 * the last user of the controller, not reset/pm hardware keeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) 	 * state we gave to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) 	temp = readl(&oxu->regs->intr_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) 	oxu_dbg(oxu, "resume root hub%s\n", temp ? "" : " after power loss");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) 	/* at least some APM implementations will try to deliver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) 	 * IRQs right away, so delay them until we're ready.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) 	writel(0, &oxu->regs->intr_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) 	/* re-init operational registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) 	writel(0, &oxu->regs->segment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) 	writel(oxu->periodic_dma, &oxu->regs->frame_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) 	writel((u32) oxu->async->qh_dma, &oxu->regs->async_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) 	/* restore CMD_RUN, framelist size, and irq threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) 	writel(oxu->command, &oxu->regs->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) 	/* Some controller/firmware combinations need a delay during which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) 	 * they set up the port statuses.  See Bugzilla #8190. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) 	mdelay(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) 	/* manually resume the ports we suspended during bus_suspend() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) 	i = HCS_N_PORTS(oxu->hcs_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) 	while (i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) 		temp = readl(&oxu->regs->port_status[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) 		temp &= ~(PORT_RWC_BITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) 			| PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) 		if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) 			oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) 			temp |= PORT_RESUME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) 		writel(temp, &oxu->regs->port_status[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) 	i = HCS_N_PORTS(oxu->hcs_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) 	mdelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) 	while (i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) 		temp = readl(&oxu->regs->port_status[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) 		if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) 			temp &= ~(PORT_RWC_BITS | PORT_RESUME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) 			writel(temp, &oxu->regs->port_status[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) 			oxu_vdbg(oxu, "resumed port %d\n", i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) 	(void) readl(&oxu->regs->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) 	/* maybe re-activate the schedule(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) 	temp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) 	if (oxu->async->qh_next.qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) 		temp |= CMD_ASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) 	if (oxu->periodic_sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) 		temp |= CMD_PSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) 	if (temp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) 		oxu->command |= temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) 		writel(oxu->command, &oxu->regs->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) 	oxu->next_statechange = jiffies + msecs_to_jiffies(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) 	hcd->state = HC_STATE_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) 	/* Now we can safely re-enable irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) 	writel(INTR_MASK, &oxu->regs->intr_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) 	spin_unlock_irq(&oxu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) static int oxu_bus_suspend(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) static int oxu_bus_resume(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) #endif	/* CONFIG_PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) static const struct hc_driver oxu_hc_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) 	.description =		"oxu210hp_hcd",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) 	.product_desc =		"oxu210hp HCD",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) 	.hcd_priv_size =	sizeof(struct oxu_hcd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) 	 * Generic hardware linkage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) 	.irq =			oxu_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) 	.flags =		HCD_MEMORY | HCD_USB2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) 	 * Basic lifecycle operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) 	.reset =		oxu_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) 	.start =		oxu_run,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) 	.stop =			oxu_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) 	.shutdown =		oxu_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) 	 * Managing i/o requests and associated device resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) 	.urb_enqueue =		oxu_urb_enqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) 	.urb_dequeue =		oxu_urb_dequeue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) 	.endpoint_disable =	oxu_endpoint_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) 	 * Scheduling support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) 	.get_frame_number =	oxu_get_frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) 	 * Root hub support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) 	.hub_status_data =	oxu_hub_status_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) 	.hub_control =		oxu_hub_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) 	.bus_suspend =		oxu_bus_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) 	.bus_resume =		oxu_bus_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065)  * Module stuff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) static void oxu_configuration(struct platform_device *pdev, void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) 	/* Initialize top level registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) 	 * First write ever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) 	oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) 	oxu_writel(base, OXU_SOFTRESET, OXU_SRESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) 	oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) 	tmp = oxu_readl(base, OXU_PIOBURSTREADCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) 	oxu_writel(base, OXU_PIOBURSTREADCTRL, tmp | 0x0040);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) 	oxu_writel(base, OXU_ASO, OXU_SPHPOEN | OXU_OVRCCURPUPDEN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) 					OXU_COMPARATOR | OXU_ASO_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) 	tmp = oxu_readl(base, OXU_CLKCTRL_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) 	oxu_writel(base, OXU_CLKCTRL_SET, tmp | OXU_SYSCLKEN | OXU_USBOTGCLKEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) 	/* Clear all top interrupt enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) 	oxu_writel(base, OXU_CHIPIRQEN_CLR, 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) 	/* Clear all top interrupt status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) 	oxu_writel(base, OXU_CHIPIRQSTATUS, 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) 	/* Enable all needed top interrupt except OTG SPH core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) 	oxu_writel(base, OXU_CHIPIRQEN_SET, OXU_USBSPHLPWUI | OXU_USBOTGLPWUI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) static int oxu_verify_id(struct platform_device *pdev, void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) 	u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) 	static const char * const bo[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) 		"reserved",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) 		"128-pin LQFP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) 		"84-pin TFBGA",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) 		"reserved",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) 	/* Read controller signature register to find a match */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) 	id = oxu_readl(base, OXU_DEVICEID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) 	dev_info(&pdev->dev, "device ID %x\n", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) 	if ((id & OXU_REV_MASK) != (OXU_REV_2100 << OXU_REV_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) 	dev_info(&pdev->dev, "found device %x %s (%04x:%04x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) 		id >> OXU_REV_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) 		bo[(id & OXU_BO_MASK) >> OXU_BO_SHIFT],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) 		(id & OXU_MAJ_REV_MASK) >> OXU_MAJ_REV_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) 		(id & OXU_MIN_REV_MASK) >> OXU_MIN_REV_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) static const struct hc_driver oxu_hc_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) static struct usb_hcd *oxu_create(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) 				unsigned long memstart, unsigned long memlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) 				void __iomem *base, int irq, int otg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) 	struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) 	struct usb_hcd *hcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) 	struct oxu_hcd *oxu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) 	/* Set endian mode and host mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) 	oxu_writel(base + (otg ? OXU_OTG_CORE_OFFSET : OXU_SPH_CORE_OFFSET),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) 				OXU_USBMODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) 				OXU_CM_HOST_ONLY | OXU_ES_LITTLE | OXU_VBPS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) 	hcd = usb_create_hcd(&oxu_hc_driver, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) 				otg ? "oxu210hp_otg" : "oxu210hp_sph");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) 	if (!hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) 	hcd->rsrc_start = memstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) 	hcd->rsrc_len = memlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) 	hcd->regs = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) 	hcd->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) 	hcd->state = HC_STATE_HALT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) 	oxu = hcd_to_oxu(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) 	oxu->is_otg = otg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) 	ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) 		usb_put_hcd(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) 		return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) 	device_wakeup_enable(hcd->self.controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) 	return hcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) static int oxu_init(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) 				unsigned long memstart, unsigned long memlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) 				void __iomem *base, int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) 	struct oxu_info *info = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) 	struct usb_hcd *hcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) 	/* First time configuration at start up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) 	oxu_configuration(pdev, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) 	ret = oxu_verify_id(pdev, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) 		dev_err(&pdev->dev, "no devices found!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) 	/* Create the OTG controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) 	hcd = oxu_create(pdev, memstart, memlen, base, irq, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) 	if (IS_ERR(hcd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) 		dev_err(&pdev->dev, "cannot create OTG controller!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) 		ret = PTR_ERR(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) 		goto error_create_otg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) 	info->hcd[0] = hcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) 	/* Create the SPH host controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) 	hcd = oxu_create(pdev, memstart, memlen, base, irq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) 	if (IS_ERR(hcd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) 		dev_err(&pdev->dev, "cannot create SPH controller!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) 		ret = PTR_ERR(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) 		goto error_create_sph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) 	info->hcd[1] = hcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) 	oxu_writel(base, OXU_CHIPIRQEN_SET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) 		oxu_readl(base, OXU_CHIPIRQEN_SET) | 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) error_create_sph:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) 	usb_remove_hcd(info->hcd[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) 	usb_put_hcd(info->hcd[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) error_create_otg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) static int oxu_drv_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) 	unsigned long memstart, memlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) 	int irq, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) 	struct oxu_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) 	if (usb_disabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) 	 * Get the platform resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) 	if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) 			"no IRQ! Check %s setup!\n", dev_name(&pdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) 	irq = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) 	dev_dbg(&pdev->dev, "IRQ resource %d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) 	base = devm_ioremap_resource(&pdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) 	if (IS_ERR(base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) 		ret = PTR_ERR(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) 	memstart = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) 	memlen = resource_size(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) 	ret = irq_set_irq_type(irq, IRQF_TRIGGER_FALLING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) 		dev_err(&pdev->dev, "error setting irq type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) 		ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) 	/* Allocate a driver data struct to hold useful info for both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) 	 * SPH & OTG devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) 	info = devm_kzalloc(&pdev->dev, sizeof(struct oxu_info), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) 	if (!info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) 		ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) 	platform_set_drvdata(pdev, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) 	ret = oxu_init(pdev, memstart, memlen, base, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) 		dev_dbg(&pdev->dev, "cannot init USB devices\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) 	dev_info(&pdev->dev, "devices enabled and running\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) 	platform_set_drvdata(pdev, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) 	dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) static void oxu_remove(struct platform_device *pdev, struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) 	usb_remove_hcd(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) 	usb_put_hcd(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) static int oxu_drv_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) 	struct oxu_info *info = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) 	oxu_remove(pdev, info->hcd[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) 	oxu_remove(pdev, info->hcd[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) static void oxu_drv_shutdown(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) 	oxu_drv_remove(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) /* FIXME: TODO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) static int oxu_drv_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) 	struct platform_device *pdev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) 	struct usb_hcd *hcd = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) static int oxu_drv_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) 	struct platform_device *pdev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) 	struct usb_hcd *hcd = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) #define oxu_drv_suspend	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) #define oxu_drv_resume	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) static struct platform_driver oxu_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) 	.probe		= oxu_drv_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) 	.remove		= oxu_drv_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) 	.shutdown	= oxu_drv_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) 	.suspend	= oxu_drv_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) 	.resume		= oxu_drv_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) 		.name = "oxu210hp-hcd",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) 		.bus = &platform_bus_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) module_platform_driver(oxu_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) MODULE_DESCRIPTION("Oxford OXU210HP HCD driver - ver. " DRIVER_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) MODULE_LICENSE("GPL");