Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (c) 2001-2004 by David Brownell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) /* this file is part of ehci-hcd.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * EHCI scheduled transaction support:  interrupt, iso, split iso
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * These are called "periodic" transactions in the EHCI spec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * Note that for interrupt transfers, the QH/QTD manipulation is shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * with the "asynchronous" transaction support (control/bulk transfers).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  * The only real difference is in how interrupt transfers are scheduled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  * For ISO, we make an "iso_stream" head to serve the same role as a QH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * It keeps track of every ITD (or SITD) that's linked, and holds enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * pre-calculated schedule data to make appending to the queue be quick.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) static int ehci_get_frame(struct usb_hcd *hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  * periodic_next_shadow - return "next" pointer on shadow list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  * @periodic: host pointer to qh/itd/sitd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  * @tag: hardware tag for type of this record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) static union ehci_shadow *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 		__hc32 tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 	switch (hc32_to_cpu(ehci, tag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	case Q_TYPE_QH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 		return &periodic->qh->qh_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	case Q_TYPE_FSTN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 		return &periodic->fstn->fstn_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	case Q_TYPE_ITD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 		return &periodic->itd->itd_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	/* case Q_TYPE_SITD: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 		return &periodic->sitd->sitd_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) static __hc32 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 		__hc32 tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	switch (hc32_to_cpu(ehci, tag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	/* our ehci_shadow.qh is actually software part */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	case Q_TYPE_QH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 		return &periodic->qh->hw->hw_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	/* others are hw parts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 		return periodic->hw_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) /* caller must hold ehci->lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) static void periodic_unlink(struct ehci_hcd *ehci, unsigned frame, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	union ehci_shadow	*prev_p = &ehci->pshadow[frame];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	__hc32			*hw_p = &ehci->periodic[frame];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	union ehci_shadow	here = *prev_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	/* find predecessor of "ptr"; hw and shadow lists are in sync */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	while (here.ptr && here.ptr != ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 		prev_p = periodic_next_shadow(ehci, prev_p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 				Q_NEXT_TYPE(ehci, *hw_p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 		hw_p = shadow_next_periodic(ehci, &here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 				Q_NEXT_TYPE(ehci, *hw_p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 		here = *prev_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	/* an interrupt entry (at list end) could have been shared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	if (!here.ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	/* update shadow and hardware lists ... the old "next" pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	 * from ptr may still be in use, the caller updates them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	*prev_p = *periodic_next_shadow(ehci, &here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 			Q_NEXT_TYPE(ehci, *hw_p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	if (!ehci->use_dummy_qh ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	    *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 			!= EHCI_LIST_END(ehci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 		*hw_p = *shadow_next_periodic(ehci, &here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 				Q_NEXT_TYPE(ehci, *hw_p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 		*hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) /* Bandwidth and TT management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) /* Find the TT data structure for this device; create it if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) static struct ehci_tt *find_tt(struct usb_device *udev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	struct usb_tt		*utt = udev->tt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	struct ehci_tt		*tt, **tt_index, **ptt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	unsigned		port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	bool			allocated_index = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	if (!utt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		return NULL;		/* Not below a TT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	 * Find/create our data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	 * For hubs with a single TT, we get it directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	 * For hubs with multiple TTs, there's an extra level of pointers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	tt_index = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	if (utt->multi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		tt_index = utt->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		if (!tt_index) {		/* Create the index array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 			tt_index = kcalloc(utt->hub->maxchild,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 					   sizeof(*tt_index),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 					   GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 			if (!tt_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 				return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 			utt->hcpriv = tt_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 			allocated_index = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		port = udev->ttport - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		ptt = &tt_index[port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 		ptt = (struct ehci_tt **) &utt->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	tt = *ptt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	if (!tt) {				/* Create the ehci_tt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		struct ehci_hcd		*ehci =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 				hcd_to_ehci(bus_to_hcd(udev->bus));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		tt = kzalloc(sizeof(*tt), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 		if (!tt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 			if (allocated_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 				utt->hcpriv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 				kfree(tt_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 			return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 		list_add_tail(&tt->tt_list, &ehci->tt_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		INIT_LIST_HEAD(&tt->ps_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 		tt->usb_tt = utt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		tt->tt_port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		*ptt = tt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	return tt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) /* Release the TT above udev, if it's not in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) static void drop_tt(struct usb_device *udev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	struct usb_tt		*utt = udev->tt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	struct ehci_tt		*tt, **tt_index, **ptt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	int			cnt, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	if (!utt || !utt->hcpriv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		return;		/* Not below a TT, or never allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	if (utt->multi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 		tt_index = utt->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 		ptt = &tt_index[udev->ttport - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		/* How many entries are left in tt_index? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 		for (i = 0; i < utt->hub->maxchild; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 			cnt += !!tt_index[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		tt_index = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		ptt = (struct ehci_tt **) &utt->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	tt = *ptt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	if (!tt || !list_empty(&tt->ps_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		return;		/* never allocated, or still in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	list_del(&tt->tt_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	*ptt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	kfree(tt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	if (cnt == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 		utt->hcpriv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		kfree(tt_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) static void bandwidth_dbg(struct ehci_hcd *ehci, int sign, char *type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		struct ehci_per_sched *ps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	dev_dbg(&ps->udev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 			"ep %02x: %s %s @ %u+%u (%u.%u+%u) [%u/%u us] mask %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 			ps->ep->desc.bEndpointAddress,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 			(sign >= 0 ? "reserve" : "release"), type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 			(ps->bw_phase << 3) + ps->phase_uf, ps->bw_uperiod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 			ps->phase, ps->phase_uf, ps->period,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 			ps->usecs, ps->c_usecs, ps->cs_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) static void reserve_release_intr_bandwidth(struct ehci_hcd *ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		struct ehci_qh *qh, int sign)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	unsigned		start_uf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	unsigned		i, j, m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	int			usecs = qh->ps.usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	int			c_usecs = qh->ps.c_usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	int			tt_usecs = qh->ps.tt_usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	struct ehci_tt		*tt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	if (qh->ps.phase == NO_FRAME)	/* Bandwidth wasn't reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	start_uf = qh->ps.bw_phase << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	bandwidth_dbg(ehci, sign, "intr", &qh->ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	if (sign < 0) {		/* Release bandwidth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 		usecs = -usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		c_usecs = -c_usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		tt_usecs = -tt_usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	/* Entire transaction (high speed) or start-split (full/low speed) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	for (i = start_uf + qh->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 			i += qh->ps.bw_uperiod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		ehci->bandwidth[i] += usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	/* Complete-split (full/low speed) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	if (qh->ps.c_usecs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		/* NOTE: adjustments needed for FSTN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		for (i = start_uf; i < EHCI_BANDWIDTH_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 				i += qh->ps.bw_uperiod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 			for ((j = 2, m = 1 << (j+8)); j < 8; (++j, m <<= 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 				if (qh->ps.cs_mask & m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 					ehci->bandwidth[i+j] += c_usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	/* FS/LS bus bandwidth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	if (tt_usecs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		tt = find_tt(qh->ps.udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		if (sign > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 			list_add_tail(&qh->ps.ps_list, &tt->ps_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 			list_del(&qh->ps.ps_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		for (i = start_uf >> 3; i < EHCI_BANDWIDTH_FRAMES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 				i += qh->ps.bw_period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 			tt->bandwidth[i] += tt_usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		struct ehci_tt *tt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	struct ehci_per_sched	*ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	unsigned		uframe, uf, x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	u8			*budget_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	if (!tt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	memset(budget_table, 0, EHCI_BANDWIDTH_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	/* Add up the contributions from all the endpoints using this TT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	list_for_each_entry(ps, &tt->ps_list, ps_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		for (uframe = ps->bw_phase << 3; uframe < EHCI_BANDWIDTH_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 				uframe += ps->bw_uperiod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 			budget_line = &budget_table[uframe];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 			x = ps->tt_usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 			/* propagate the time forward */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 			for (uf = ps->phase_uf; uf < 8; ++uf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 				x += budget_line[uf];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 				/* Each microframe lasts 125 us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 				if (x <= 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 					budget_line[uf] = x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 				budget_line[uf] = 125;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 				x -= 125;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) static int __maybe_unused same_tt(struct usb_device *dev1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		struct usb_device *dev2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	if (!dev1->tt || !dev2->tt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	if (dev1->tt != dev2->tt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	if (dev1->tt->multi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		return dev1->ttport == dev2->ttport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) #ifdef CONFIG_USB_EHCI_TT_NEWSCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) static const unsigned char
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) /* carryover low/fullspeed bandwidth that crosses uframe boundries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	for (i = 0; i < 7; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		if (max_tt_usecs[i] < tt_usecs[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 			tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 			tt_usecs[i] = max_tt_usecs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327)  * Return true if the device's tt's downstream bus is available for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328)  * periodic transfer of the specified length (usecs), starting at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329)  * specified frame/uframe.  Note that (as summarized in section 11.19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330)  * of the usb 2.0 spec) TTs can buffer multiple transactions for each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)  * uframe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333)  * The uframe parameter is when the fullspeed/lowspeed transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334)  * should be executed in "B-frame" terms, which is the same as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335)  * highspeed ssplit's uframe (which is in "H-frame" terms).  For example
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336)  * a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337)  * See the EHCI spec sec 4.5 and fig 4.7.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339)  * This checks if the full/lowspeed bus, at the specified starting uframe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340)  * has the specified bandwidth available, according to rules listed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341)  * in USB 2.0 spec section 11.18.1 fig 11-60.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343)  * This does not check if the transfer would exceed the max ssplit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344)  * limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345)  * since proper scheduling limits ssplits to less than 16 per uframe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) static int tt_available(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	struct ehci_hcd		*ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	struct ehci_per_sched	*ps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	struct ehci_tt		*tt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	unsigned		frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	unsigned		uframe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	unsigned		period = ps->bw_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	unsigned		usecs = ps->tt_usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	if ((period == 0) || (uframe >= 7))	/* error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	for (frame &= period - 1; frame < EHCI_BANDWIDTH_FRAMES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 			frame += period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		unsigned	i, uf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		unsigned short	tt_usecs[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		if (tt->bandwidth[frame] + usecs > 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		uf = frame << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		for (i = 0; i < 8; (++i, ++uf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 			tt_usecs[i] = ehci->tt_budget[uf];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		if (max_tt_usecs[uframe] <= tt_usecs[uframe])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		/* special case for isoc transfers larger than 125us:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		 * the first and each subsequent fully used uframe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		 * must be empty, so as to not illegally delay
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		 * already scheduled transactions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		if (usecs > 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 			int ufs = (usecs / 125);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 			for (i = uframe; i < (uframe + ufs) && i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 				if (tt_usecs[i] > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 					return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		tt_usecs[uframe] += usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		carryover_tt_bandwidth(tt_usecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		/* fail if the carryover pushed bw past the last uframe's limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		if (max_tt_usecs[7] < tt_usecs[7])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) /* return true iff the device's transaction translator is available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404)  * for a periodic transfer starting at the specified frame, using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405)  * all the uframes in the mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) static int tt_no_collision(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	struct ehci_hcd		*ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	unsigned		period,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	struct usb_device	*dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	unsigned		frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	u32			uf_mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	if (period == 0)	/* error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	/* note bandwidth wastage:  split never follows csplit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	 * (different dev or endpoint) until the next uframe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	 * calling convention doesn't make that distinction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	for (; frame < ehci->periodic_size; frame += period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		union ehci_shadow	here;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		__hc32			type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 		struct ehci_qh_hw	*hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		here = ehci->pshadow[frame];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		type = Q_NEXT_TYPE(ehci, ehci->periodic[frame]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		while (here.ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 			switch (hc32_to_cpu(ehci, type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 			case Q_TYPE_ITD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 				type = Q_NEXT_TYPE(ehci, here.itd->hw_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 				here = here.itd->itd_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 			case Q_TYPE_QH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 				hw = here.qh->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 				if (same_tt(dev, here.qh->ps.udev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 					u32		mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 					mask = hc32_to_cpu(ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 							hw->hw_info2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 					/* "knows" no gap is needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 					mask |= mask >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 					if (mask & uf_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 						break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 				type = Q_NEXT_TYPE(ehci, hw->hw_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 				here = here.qh->qh_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 			case Q_TYPE_SITD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 				if (same_tt(dev, here.sitd->urb->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 					u16		mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 					mask = hc32_to_cpu(ehci, here.sitd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 								->hw_uframe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 					/* FIXME assumes no gap for IN! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 					mask |= mask >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 					if (mask & uf_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 						break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 				type = Q_NEXT_TYPE(ehci, here.sitd->hw_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 				here = here.sitd->sitd_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 			/* case Q_TYPE_FSTN: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 				ehci_dbg(ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 					"periodic frame %d bogus type %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 					frame, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 			/* collision or error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	/* no collision */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) #endif /* CONFIG_USB_EHCI_TT_NEWSCHED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) static void enable_periodic(struct ehci_hcd *ehci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	if (ehci->periodic_count++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	/* Stop waiting to turn off the periodic schedule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	/* Don't start the schedule until PSS is 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	ehci_poll_PSS(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	turn_on_io_watchdog(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) static void disable_periodic(struct ehci_hcd *ehci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	if (--ehci->periodic_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	/* Don't turn off the schedule until PSS is 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	ehci_poll_PSS(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) /* periodic schedule slots have iso tds (normal or split) first, then a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509)  * sparse tree for active interrupt transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511)  * this just links in a qh; caller guarantees uframe masks are set right.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512)  * no FSTN support (yet; ehci 0.96+)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	unsigned	i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	unsigned	period = qh->ps.period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	dev_dbg(&qh->ps.udev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		"link qh%d-%04x/%p start %d [%d/%d us]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 			& (QH_CMASK | QH_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	/* high bandwidth, or otherwise every microframe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	if (period == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		period = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	for (i = qh->ps.phase; i < ehci->periodic_size; i += period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		union ehci_shadow	*prev = &ehci->pshadow[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		__hc32			*hw_p = &ehci->periodic[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		union ehci_shadow	here = *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		__hc32			type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		/* skip the iso nodes at list head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		while (here.ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 			type = Q_NEXT_TYPE(ehci, *hw_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 			if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 			prev = periodic_next_shadow(ehci, prev, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 			hw_p = shadow_next_periodic(ehci, &here, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 			here = *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		/* sorting each branch by period (slow-->fast)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		 * enables sharing interior tree nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		while (here.ptr && qh != here.qh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 			if (qh->ps.period > here.qh->ps.period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 			prev = &here.qh->qh_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 			hw_p = &here.qh->hw->hw_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 			here = *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		/* link in this qh, unless some earlier pass did that */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		if (qh != here.qh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 			qh->qh_next = here;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 			if (here.qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 				qh->hw->hw_next = *hw_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 			wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 			prev->qh = qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 			*hw_p = QH_NEXT(ehci, qh->qh_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	qh->qh_state = QH_STATE_LINKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	qh->xacterrs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	qh->unlink_reason = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	/* update per-qh bandwidth for debugfs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->ps.bw_period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		: (qh->ps.usecs * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	list_add(&qh->intr_node, &ehci->intr_qh_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	/* maybe enable periodic schedule processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	++ehci->intr_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	enable_periodic(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	unsigned	i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	unsigned	period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	 * If qh is for a low/full-speed device, simply unlinking it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	 * could interfere with an ongoing split transaction.  To unlink
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	 * it safely would require setting the QH_INACTIVATE bit and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	 * waiting at least one frame, as described in EHCI 4.12.2.5.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	 * We won't bother with any of this.  Instead, we assume that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	 * only reason for unlinking an interrupt QH while the current URB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	 * is still active is to dequeue all the URBs (flush the whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	 * endpoint queue).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	 * If rebalancing the periodic schedule is ever implemented, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	 * approach will no longer be valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	/* high bandwidth, or otherwise part of every microframe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	period = qh->ps.period ? : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	for (i = qh->ps.phase; i < ehci->periodic_size; i += period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		periodic_unlink(ehci, i, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	/* update per-qh bandwidth for debugfs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->ps.bw_period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		: (qh->ps.usecs * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	dev_dbg(&qh->ps.udev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		"unlink qh%d-%04x/%p start %d [%d/%d us]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		qh->ps.period,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	/* qh->qh_next still "live" to HC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	qh->qh_state = QH_STATE_UNLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	qh->qh_next.ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	if (ehci->qh_scan_next == qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		ehci->qh_scan_next = list_entry(qh->intr_node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 				struct ehci_qh, intr_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	list_del(&qh->intr_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) static void cancel_unlink_wait_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	if (qh->qh_state != QH_STATE_LINKED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 			list_empty(&qh->unlink_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	list_del_init(&qh->unlink_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	 * TODO: disable the event of EHCI_HRTIMER_START_UNLINK_INTR for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	 * avoiding unnecessary CPU wakeup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	/* If the QH isn't linked then there's nothing we can do. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	if (qh->qh_state != QH_STATE_LINKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	/* if the qh is waiting for unlink, cancel it now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	cancel_unlink_wait_intr(ehci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	qh_unlink_periodic(ehci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	/* Make sure the unlinks are visible before starting the timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	 * The EHCI spec doesn't say how long it takes the controller to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	 * stop accessing an unlinked interrupt QH.  The timer delay is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	 * 9 uframes; presumably that will be long enough.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	qh->unlink_cycle = ehci->intr_unlink_cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	/* New entries go at the end of the intr_unlink list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	list_add_tail(&qh->unlink_node, &ehci->intr_unlink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	if (ehci->intr_unlinking)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		;	/* Avoid recursive calls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	else if (ehci->rh_state < EHCI_RH_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		ehci_handle_intr_unlinks(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	else if (ehci->intr_unlink.next == &qh->unlink_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		++ehci->intr_unlink_cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677)  * It is common only one intr URB is scheduled on one qh, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678)  * given complete() is run in tasklet context, introduce a bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679)  * delay to avoid unlink qh too early.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) static void start_unlink_intr_wait(struct ehci_hcd *ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 				   struct ehci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	qh->unlink_cycle = ehci->intr_unlink_wait_cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	/* New entries go at the end of the intr_unlink_wait list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	list_add_tail(&qh->unlink_node, &ehci->intr_unlink_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	if (ehci->rh_state < EHCI_RH_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		ehci_handle_start_intr_unlinks(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	else if (ehci->intr_unlink_wait.next == &qh->unlink_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		++ehci->intr_unlink_wait_cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	struct ehci_qh_hw	*hw = qh->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	int			rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	qh->qh_state = QH_STATE_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	hw->hw_next = EHCI_LIST_END(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	if (!list_empty(&qh->qtd_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		qh_completions(ehci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	/* reschedule QH iff another request is queued */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		rc = qh_schedule(ehci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 			qh_refresh(ehci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 			qh_link_periodic(ehci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		/* An error here likely indicates handshake failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		 * or no space left in the schedule.  Neither fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		 * should happen often ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		 * FIXME kill the now-dysfunctional queued urbs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 			ehci_err(ehci, "can't reschedule qh %p, err %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 					qh, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	/* maybe turn off periodic schedule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	--ehci->intr_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	disable_periodic(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) static int check_period(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	struct ehci_hcd *ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	unsigned	frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	unsigned	uframe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	unsigned	uperiod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	unsigned	usecs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	/* complete split running into next frame?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	 * given FSTN support, we could sometimes check...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	if (uframe >= 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	/* convert "usecs we need" to "max already claimed" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	usecs = ehci->uframe_periodic_max - usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	for (uframe += frame << 3; uframe < EHCI_BANDWIDTH_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 			uframe += uperiod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		if (ehci->bandwidth[uframe] > usecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	/* success! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) static int check_intr_schedule(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	struct ehci_hcd		*ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	unsigned		frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	unsigned		uframe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	struct ehci_qh		*qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	unsigned		*c_maskp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	struct ehci_tt		*tt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	int		retval = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	u8		mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	if (qh->ps.c_usecs && uframe >= 6)	/* FSTN territory? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	if (!check_period(ehci, frame, uframe, qh->ps.bw_uperiod, qh->ps.usecs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	if (!qh->ps.c_usecs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		*c_maskp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) #ifdef CONFIG_USB_EHCI_TT_NEWSCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	if (tt_available(ehci, &qh->ps, tt, frame, uframe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		/* TODO : this may need FSTN for SSPLIT in uframe 5. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		for (i = uframe+2; i < 8 && i <= uframe+4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 			if (!check_period(ehci, frame, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 					qh->ps.bw_uperiod, qh->ps.c_usecs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 				goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 				mask |= 1 << i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		*c_maskp = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	/* Make sure this tt's buffer is also available for CSPLITs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	 * We pessimize a bit; probably the typical full speed case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	 * doesn't need the second CSPLIT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	 * NOTE:  both SPLIT and CSPLIT could be checked in just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	 * one smart pass...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	mask = 0x03 << (uframe + qh->gap_uf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	*c_maskp = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	mask |= 1 << uframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	if (tt_no_collision(ehci, qh->ps.bw_period, qh->ps.udev, frame, mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		if (!check_period(ehci, frame, uframe + qh->gap_uf + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 				qh->ps.bw_uperiod, qh->ps.c_usecs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		if (!check_period(ehci, frame, uframe + qh->gap_uf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 				qh->ps.bw_uperiod, qh->ps.c_usecs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) /* "first fit" scheduling policy used the first time through,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827)  * or when the previous schedule slot can't be re-used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	int		status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	unsigned	uframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	unsigned	c_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	struct ehci_qh_hw	*hw = qh->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	struct ehci_tt		*tt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	hw->hw_next = EHCI_LIST_END(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	/* reuse the previous schedule slots, if we can */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	if (qh->ps.phase != NO_FRAME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		ehci_dbg(ehci, "reused qh %p schedule\n", qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	uframe = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	c_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	tt = find_tt(qh->ps.udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	if (IS_ERR(tt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		status = PTR_ERR(tt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	compute_tt_budget(ehci->tt_budget, tt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	/* else scan the schedule to find a group of slots such that all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	 * uframes have enough periodic bandwidth available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	/* "normal" case, uframing flexible except with splits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	if (qh->ps.bw_period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		int		i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		unsigned	frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		for (i = qh->ps.bw_period; i > 0; --i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 			frame = ++ehci->random_frame & (qh->ps.bw_period - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 			for (uframe = 0; uframe < 8; uframe++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 				status = check_intr_schedule(ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 						frame, uframe, qh, &c_mask, tt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 				if (status == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 					goto got_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	/* qh->ps.bw_period == 0 means every uframe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		status = check_intr_schedule(ehci, 0, 0, qh, &c_mask, tt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879)  got_it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	qh->ps.phase = (qh->ps.period ? ehci->random_frame &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 			(qh->ps.period - 1) : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	qh->ps.bw_phase = qh->ps.phase & (qh->ps.bw_period - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	qh->ps.phase_uf = uframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	qh->ps.cs_mask = qh->ps.period ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			(c_mask << 8) | (1 << uframe) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 			QH_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	/* reset S-frame and (maybe) C-frame masks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	hw->hw_info2 |= cpu_to_hc32(ehci, qh->ps.cs_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	reserve_release_intr_bandwidth(ehci, qh, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) static int intr_submit(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	struct ehci_hcd		*ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	struct urb		*urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	struct list_head	*qtd_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	gfp_t			mem_flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	unsigned		epnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	struct ehci_qh		*qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	int			status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	struct list_head	empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	/* get endpoint and transfer/schedule data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	epnum = urb->ep->desc.bEndpointAddress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	spin_lock_irqsave(&ehci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		status = -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		goto done_not_linked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	if (unlikely(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		goto done_not_linked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	/* get qh and force any scheduling errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	INIT_LIST_HEAD(&empty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	if (qh == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	if (qh->qh_state == QH_STATE_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		status = qh_schedule(ehci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	/* then queue the urb's tds to the qh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	BUG_ON(qh == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	/* stuff into the periodic schedule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	if (qh->qh_state == QH_STATE_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		qh_refresh(ehci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		qh_link_periodic(ehci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		/* cancel unlink wait for the qh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		cancel_unlink_wait_intr(ehci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	/* ... update usbfs periodic stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	if (unlikely(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) done_not_linked:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	spin_unlock_irqrestore(&ehci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		qtd_list_free(ehci, urb, qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) static void scan_intr(struct ehci_hcd *ehci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	struct ehci_qh		*qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 			intr_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		/* clean any finished work for this qh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		if (!list_empty(&qh->qtd_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 			int temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 			 * Unlinks could happen here; completion reporting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 			 * drops the lock.  That's why ehci->qh_scan_next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 			 * always holds the next qh to scan; if the next qh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 			 * gets unlinked then ehci->qh_scan_next is adjusted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 			 * in qh_unlink_periodic().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 			temp = qh_completions(ehci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 			if (unlikely(temp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 				start_unlink_intr(ehci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 			else if (unlikely(list_empty(&qh->qtd_list) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 					qh->qh_state == QH_STATE_LINKED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 				start_unlink_intr_wait(ehci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) /* ehci_iso_stream ops work with both ITD and SITD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) static struct ehci_iso_stream *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) iso_stream_alloc(gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	struct ehci_iso_stream *stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	stream = kzalloc(sizeof(*stream), mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	if (likely(stream != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		INIT_LIST_HEAD(&stream->td_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		INIT_LIST_HEAD(&stream->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		stream->next_uframe = NO_FRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		stream->ps.phase = NO_FRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	return stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) iso_stream_init(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	struct ehci_hcd		*ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	struct ehci_iso_stream	*stream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	struct urb		*urb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	static const u8 smask_out[] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	struct usb_device	*dev = urb->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	u32			buf1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	unsigned		epnum, maxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	int			is_input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	unsigned		tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	 * this might be a "high bandwidth" highspeed endpoint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	 * as encoded in the ep descriptor's wMaxPacket field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	epnum = usb_pipeendpoint(urb->pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	is_input = usb_pipein(urb->pipe) ? USB_DIR_IN : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	maxp = usb_endpoint_maxp(&urb->ep->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	buf1 = is_input ? 1 << 11 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	/* knows about ITD vs SITD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	if (dev->speed == USB_SPEED_HIGH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		unsigned multi = usb_endpoint_maxp_mult(&urb->ep->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		stream->highspeed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		buf1 |= maxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		maxp *= multi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		stream->buf1 = cpu_to_hc32(ehci, buf1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		stream->buf2 = cpu_to_hc32(ehci, multi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		/* usbfs wants to report the average usecs per frame tied up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		 * when transfers on this endpoint are scheduled ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		stream->ps.usecs = HS_USECS_ISO(maxp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		/* period for bandwidth allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 				1 << (urb->ep->desc.bInterval - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		/* Allow urb->interval to override */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		stream->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		stream->uperiod = urb->interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		stream->ps.period = urb->interval >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		stream->bandwidth = stream->ps.usecs * 8 /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 				stream->ps.bw_uperiod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		u32		addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		int		think_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		int		hs_transfers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		addr = dev->ttport << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		if (!ehci_is_TDI(ehci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 				|| (dev->tt->hub !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 					ehci_to_hcd(ehci)->self.root_hub))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 			addr |= dev->tt->hub->devnum << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		addr |= epnum << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		addr |= dev->devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		stream->ps.usecs = HS_USECS_ISO(maxp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		think_time = dev->tt->think_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		stream->ps.tt_usecs = NS_TO_US(think_time + usb_calc_bus_time(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 				dev->speed, is_input, 1, maxp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		hs_transfers = max(1u, (maxp + 187) / 188);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		if (is_input) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 			u32	tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 			addr |= 1 << 31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 			stream->ps.c_usecs = stream->ps.usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 			stream->ps.usecs = HS_USECS_ISO(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 			stream->ps.cs_mask = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 			/* c-mask as specified in USB 2.0 11.18.4 3.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 			tmp = (1 << (hs_transfers + 2)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 			stream->ps.cs_mask |= tmp << (8 + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 			stream->ps.cs_mask = smask_out[hs_transfers - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		/* period for bandwidth allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 				1 << (urb->ep->desc.bInterval - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		/* Allow urb->interval to override */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		stream->ps.bw_period = min_t(unsigned, tmp, urb->interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		stream->ps.bw_uperiod = stream->ps.bw_period << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		stream->ps.period = urb->interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		stream->uperiod = urb->interval << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		stream->bandwidth = (stream->ps.usecs + stream->ps.c_usecs) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 				stream->ps.bw_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		/* stream->splits gets created from cs_mask later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		stream->address = cpu_to_hc32(ehci, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	stream->ps.udev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	stream->ps.ep = urb->ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	stream->bEndpointAddress = is_input | epnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	stream->maxp = maxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) static struct ehci_iso_stream *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) iso_stream_find(struct ehci_hcd *ehci, struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	unsigned		epnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	struct ehci_iso_stream	*stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	struct usb_host_endpoint *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	epnum = usb_pipeendpoint (urb->pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	if (usb_pipein(urb->pipe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		ep = urb->dev->ep_in[epnum];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		ep = urb->dev->ep_out[epnum];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	spin_lock_irqsave(&ehci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	stream = ep->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	if (unlikely(stream == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		stream = iso_stream_alloc(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		if (likely(stream != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 			ep->hcpriv = stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			iso_stream_init(ehci, stream, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	/* if dev->ep [epnum] is a QH, hw is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	} else if (unlikely(stream->hw != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		ehci_dbg(ehci, "dev %s ep%d%s, not iso??\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 			urb->dev->devpath, epnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 			usb_pipein(urb->pipe) ? "in" : "out");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		stream = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	spin_unlock_irqrestore(&ehci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	return stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) /* ehci_iso_sched ops can be ITD-only or SITD-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) static struct ehci_iso_sched *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) iso_sched_alloc(unsigned packets, gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	struct ehci_iso_sched	*iso_sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	int			size = sizeof(*iso_sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	size += packets * sizeof(struct ehci_iso_packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	iso_sched = kzalloc(size, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	if (likely(iso_sched != NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		INIT_LIST_HEAD(&iso_sched->td_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	return iso_sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) itd_sched_init(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	struct ehci_hcd		*ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	struct ehci_iso_sched	*iso_sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	struct ehci_iso_stream	*stream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	struct urb		*urb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	unsigned	i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	dma_addr_t	dma = urb->transfer_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	/* how many uframes are needed for these transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	iso_sched->span = urb->number_of_packets * stream->uperiod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	/* figure out per-uframe itd fields that we'll need later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	 * when we fit new itds into the schedule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	for (i = 0; i < urb->number_of_packets; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		struct ehci_iso_packet	*uframe = &iso_sched->packet[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		unsigned		length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		dma_addr_t		buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		u32			trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		length = urb->iso_frame_desc[i].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		buf = dma + urb->iso_frame_desc[i].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		trans = EHCI_ISOC_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		trans |= buf & 0x0fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		if (unlikely(((i + 1) == urb->number_of_packets))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 				&& !(urb->transfer_flags & URB_NO_INTERRUPT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 			trans |= EHCI_ITD_IOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		trans |= length << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		uframe->transaction = cpu_to_hc32(ehci, trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		/* might need to cross a buffer page within a uframe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		uframe->bufp = (buf & ~(u64)0x0fff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		buf += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		if (unlikely((uframe->bufp != (buf & ~(u64)0x0fff))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 			uframe->cross = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) iso_sched_free(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	struct ehci_iso_stream	*stream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	struct ehci_iso_sched	*iso_sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	if (!iso_sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	/* caller must hold ehci->lock! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	list_splice(&iso_sched->td_list, &stream->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	kfree(iso_sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) itd_urb_transaction(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	struct ehci_iso_stream	*stream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	struct ehci_hcd		*ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	struct urb		*urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	gfp_t			mem_flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	struct ehci_itd		*itd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	dma_addr_t		itd_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	int			i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	unsigned		num_itds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	struct ehci_iso_sched	*sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	if (unlikely(sched == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	itd_sched_init(ehci, sched, stream, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	if (urb->interval < 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		num_itds = 1 + (sched->span + 7) / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 		num_itds = urb->number_of_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	/* allocate/init ITDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	spin_lock_irqsave(&ehci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	for (i = 0; i < num_itds; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		 * Use iTDs from the free list, but not iTDs that may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		 * still be in use by the hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		if (likely(!list_empty(&stream->free_list))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 			itd = list_first_entry(&stream->free_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 					struct ehci_itd, itd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 			if (itd->frame == ehci->now_frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 				goto alloc_itd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 			list_del(&itd->itd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 			itd_dma = itd->itd_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)  alloc_itd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 			spin_unlock_irqrestore(&ehci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 			itd = dma_pool_alloc(ehci->itd_pool, mem_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 					&itd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 			spin_lock_irqsave(&ehci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 			if (!itd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 				iso_sched_free(stream, sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 				spin_unlock_irqrestore(&ehci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		memset(itd, 0, sizeof(*itd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		itd->itd_dma = itd_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 		itd->frame = NO_FRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 		list_add(&itd->itd_list, &sched->td_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	spin_unlock_irqrestore(&ehci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	/* temporarily store schedule info in hcpriv */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	urb->hcpriv = sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	urb->error_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) static void reserve_release_iso_bandwidth(struct ehci_hcd *ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		struct ehci_iso_stream *stream, int sign)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	unsigned		uframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	unsigned		i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	unsigned		s_mask, c_mask, m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	int			usecs = stream->ps.usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	int			c_usecs = stream->ps.c_usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	int			tt_usecs = stream->ps.tt_usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	struct ehci_tt		*tt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	if (stream->ps.phase == NO_FRAME)	/* Bandwidth wasn't reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	uframe = stream->ps.bw_phase << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	bandwidth_dbg(ehci, sign, "iso", &stream->ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	if (sign < 0) {		/* Release bandwidth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		usecs = -usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		c_usecs = -c_usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		tt_usecs = -tt_usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	if (!stream->splits) {		/* High speed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		for (i = uframe + stream->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 				i += stream->ps.bw_uperiod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 			ehci->bandwidth[i] += usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	} else {			/* Full speed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		s_mask = stream->ps.cs_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		c_mask = s_mask >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		/* NOTE: adjustment needed for frame overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		for (i = uframe; i < EHCI_BANDWIDTH_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 				i += stream->ps.bw_uperiod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 			for ((j = stream->ps.phase_uf, m = 1 << j); j < 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 					(++j, m <<= 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 				if (s_mask & m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 					ehci->bandwidth[i+j] += usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 				else if (c_mask & m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 					ehci->bandwidth[i+j] += c_usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		tt = find_tt(stream->ps.udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 		if (sign > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 			list_add_tail(&stream->ps.ps_list, &tt->ps_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 			list_del(&stream->ps.ps_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		for (i = uframe >> 3; i < EHCI_BANDWIDTH_FRAMES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 				i += stream->ps.bw_period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 			tt->bandwidth[i] += tt_usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) itd_slot_ok(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	struct ehci_hcd		*ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	struct ehci_iso_stream	*stream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	unsigned		uframe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	unsigned		usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	/* convert "usecs we need" to "max already claimed" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	usecs = ehci->uframe_periodic_max - stream->ps.usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	for (uframe &= stream->ps.bw_uperiod - 1; uframe < EHCI_BANDWIDTH_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 			uframe += stream->ps.bw_uperiod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		if (ehci->bandwidth[uframe] > usecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) sitd_slot_ok(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	struct ehci_hcd		*ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	struct ehci_iso_stream	*stream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	unsigned		uframe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	struct ehci_iso_sched	*sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	struct ehci_tt		*tt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	unsigned		mask, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	unsigned		frame, uf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	mask = stream->ps.cs_mask << (uframe & 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	/* for OUT, don't wrap SSPLIT into H-microframe 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	if (((stream->ps.cs_mask & 0xff) << (uframe & 7)) >= (1 << 7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	/* for IN, don't wrap CSPLIT into the next frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	if (mask & ~0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	/* check bandwidth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	uframe &= stream->ps.bw_uperiod - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	frame = uframe >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) #ifdef CONFIG_USB_EHCI_TT_NEWSCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	/* The tt's fullspeed bus bandwidth must be available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	 * tt_available scheduling guarantees 10+% for control/bulk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	uf = uframe & 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	if (!tt_available(ehci, &stream->ps, tt, frame, uf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	/* tt must be idle for start(s), any gap, and csplit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	 * assume scheduling slop leaves 10+% for control/bulk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	if (!tt_no_collision(ehci, stream->ps.bw_period,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 			stream->ps.udev, frame, mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		unsigned	max_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		unsigned	i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		/* check starts (OUT uses more than one) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		uf = uframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		max_used = ehci->uframe_periodic_max - stream->ps.usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		for (tmp = stream->ps.cs_mask & 0xff; tmp; tmp >>= 1, uf++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 			if (ehci->bandwidth[uf] > max_used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 		/* for IN, check CSPLIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		if (stream->ps.c_usecs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 			max_used = ehci->uframe_periodic_max -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 					stream->ps.c_usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 			uf = uframe & ~7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 			tmp = 1 << (2+8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 			for (i = (uframe & 7) + 2; i < 8; (++i, tmp <<= 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 				if ((stream->ps.cs_mask & tmp) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 				if (ehci->bandwidth[uf+i] > max_used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 					return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 		uframe += stream->ps.bw_uperiod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	} while (uframe < EHCI_BANDWIDTH_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	stream->ps.cs_mask <<= uframe & 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	stream->splits = cpu_to_hc32(ehci, stream->ps.cs_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)  * This scheduler plans almost as far into the future as it has actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)  * periodic schedule slots.  (Affected by TUNE_FLS, which defaults to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)  * "as small as possible" to be cache-friendlier.)  That limits the size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)  * transfers you can stream reliably; avoid more than 64 msec per urb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)  * Also avoid queue depths of less than ehci's worst irq latency (affected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)  * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)  * and other factors); or more than about 230 msec total (for portability,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)  * given EHCI_TUNE_FLS and the slop).  Or, write a smarter scheduler!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) iso_stream_schedule(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	struct ehci_hcd		*ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	struct urb		*urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	struct ehci_iso_stream	*stream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	u32			now, base, next, start, period, span, now2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	u32			wrap = 0, skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	int			status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	unsigned		mod = ehci->periodic_size << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	struct ehci_iso_sched	*sched = urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	bool			empty = list_empty(&stream->td_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	bool			new_stream = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	period = stream->uperiod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	span = sched->span;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	if (!stream->highspeed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 		span <<= 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	/* Start a new isochronous stream? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	if (unlikely(empty && !hcd_periodic_completion_in_progress(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 			ehci_to_hcd(ehci), urb->ep))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 		/* Schedule the endpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		if (stream->ps.phase == NO_FRAME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 			int		done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 			struct ehci_tt	*tt = find_tt(stream->ps.udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 			if (IS_ERR(tt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 				status = PTR_ERR(tt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 				goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 			compute_tt_budget(ehci->tt_budget, tt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 			start = ((-(++ehci->random_frame)) << 3) & (period - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 			/* find a uframe slot with enough bandwidth.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 			 * Early uframes are more precious because full-speed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 			 * iso IN transfers can't use late uframes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 			 * and therefore they should be allocated last.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 			next = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 			start += period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 			do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 				start--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 				/* check schedule: enough space? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 				if (stream->highspeed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 					if (itd_slot_ok(ehci, stream, start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 						done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 				} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 					if ((start % 8) >= 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 						continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 					if (sitd_slot_ok(ehci, stream, start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 							sched, tt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 						done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 			} while (start > next && !done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 			/* no room in the schedule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 			if (!done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 				ehci_dbg(ehci, "iso sched full %p", urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 				status = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 				goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 			stream->ps.phase = (start >> 3) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 					(stream->ps.period - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 			stream->ps.bw_phase = stream->ps.phase &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 					(stream->ps.bw_period - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 			stream->ps.phase_uf = start & 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 			reserve_release_iso_bandwidth(ehci, stream, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 		/* New stream is already scheduled; use the upcoming slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 			start = (stream->ps.phase << 3) + stream->ps.phase_uf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		stream->next_uframe = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 		new_stream = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	now = ehci_read_frame_index(ehci) & (mod - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	/* Take the isochronous scheduling threshold into account */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	if (ehci->i_thresh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 		next = now + ehci->i_thresh;	/* uframe cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 		next = (now + 2 + 7) & ~0x07;	/* full frame cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	/* If needed, initialize last_iso_frame so that this URB will be seen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	if (ehci->isoc_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 		ehci->last_iso_frame = now >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	 * Use ehci->last_iso_frame as the base.  There can't be any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	 * TDs scheduled for earlier than that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	base = ehci->last_iso_frame << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	next = (next - base) & (mod - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	start = (stream->next_uframe - base) & (mod - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	if (unlikely(new_stream))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		goto do_ASAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	 * Typical case: reuse current schedule, stream may still be active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	 * Hopefully there are no gaps from the host falling behind
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	 * (irq delays etc).  If there are, the behavior depends on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	 * whether URB_ISO_ASAP is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	now2 = (now - base) & (mod - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	/* Is the schedule about to wrap around? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	if (unlikely(!empty && start < period)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 				urb, stream->next_uframe, base, period, mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		status = -EFBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	/* Is the next packet scheduled after the base time? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	if (likely(!empty || start <= now2 + period)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 		/* URB_ISO_ASAP: make sure that start >= next */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 		if (unlikely(start < next &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 				(urb->transfer_flags & URB_ISO_ASAP)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 			goto do_ASAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		/* Otherwise use start, if it's not in the past */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		if (likely(start >= now2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 			goto use_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	/* Otherwise we got an underrun while the queue was empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 		if (urb->transfer_flags & URB_ISO_ASAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 			goto do_ASAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 		wrap = mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 		now2 += mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	/* How many uframes and packets do we need to skip? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	skip = (now2 - start + period - 1) & -period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	if (skip >= span) {		/* Entirely in the past? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 		ehci_dbg(ehci, "iso underrun %p (%u+%u < %u) [%u]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 				urb, start + base, span - period, now2 + base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 				base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		/* Try to keep the last TD intact for scanning later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 		skip = span - period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 		/* Will it come before the current scan position? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 		if (empty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 			skip = span;	/* Skip the entire URB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 			status = 1;	/* and give it back immediately */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 			iso_sched_free(stream, sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 			sched = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	urb->error_count = skip / period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	if (sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 		sched->first_packet = urb->error_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	goto use_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)  do_ASAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	/* Use the first slot after "next" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	start = next + ((start - next) & (period - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)  use_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	/* Tried to schedule too far into the future? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	if (unlikely(start + span - period >= mod + wrap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 		ehci_dbg(ehci, "request %p would overflow (%u+%u >= %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 				urb, start, span - period, mod + wrap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 		status = -EFBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	start += base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	stream->next_uframe = (start + skip) & (mod - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	/* report high speed start in uframes; full speed, in frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	urb->start_frame = start & (mod - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	if (!stream->highspeed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 		urb->start_frame >>= 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)  fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	iso_sched_free(stream, sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	urb->hcpriv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 		struct ehci_itd *itd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	/* it's been recently zeroed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	itd->hw_next = EHCI_LIST_END(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	itd->hw_bufp[0] = stream->buf0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	itd->hw_bufp[1] = stream->buf1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	itd->hw_bufp[2] = stream->buf2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	for (i = 0; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		itd->index[i] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	/* All other fields are filled when scheduling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) itd_patch(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	struct ehci_hcd		*ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	struct ehci_itd		*itd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	struct ehci_iso_sched	*iso_sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	unsigned		index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	u16			uframe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	struct ehci_iso_packet	*uf = &iso_sched->packet[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	unsigned		pg = itd->pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	/* BUG_ON(pg == 6 && uf->cross); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	uframe &= 0x07;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	itd->index[uframe] = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	itd->hw_transaction[uframe] = uf->transaction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	/* iso_frame_desc[].offset must be strictly increasing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	if (unlikely(uf->cross)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		u64	bufp = uf->bufp + 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 		itd->pg = ++pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 		itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) itd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	union ehci_shadow	*prev = &ehci->pshadow[frame];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	__hc32			*hw_p = &ehci->periodic[frame];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	union ehci_shadow	here = *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	__hc32			type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	/* skip any iso nodes which might belong to previous microframes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	while (here.ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 		type = Q_NEXT_TYPE(ehci, *hw_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 		if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 		prev = periodic_next_shadow(ehci, prev, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 		hw_p = shadow_next_periodic(ehci, &here, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		here = *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	itd->itd_next = here;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	itd->hw_next = *hw_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	prev->itd = itd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	itd->frame = frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	*hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) /* fit urb's itds into the selected schedule slot; activate as needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) static void itd_link_urb(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	struct ehci_hcd		*ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	struct urb		*urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	unsigned		mod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	struct ehci_iso_stream	*stream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	int			packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	unsigned		next_uframe, uframe, frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	struct ehci_iso_sched	*iso_sched = urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	struct ehci_itd		*itd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	next_uframe = stream->next_uframe & (mod - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	if (unlikely(list_empty(&stream->td_list)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 		ehci_to_hcd(ehci)->self.bandwidth_allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 				+= stream->bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 		if (ehci->amd_pll_fix == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 			usb_amd_quirk_pll_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	/* fill iTDs uframe by uframe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	for (packet = iso_sched->first_packet, itd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 			packet < urb->number_of_packets;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 		if (itd == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 			/* ASSERT:  we have all necessary itds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 			/* BUG_ON(list_empty(&iso_sched->td_list)); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 			/* ASSERT:  no itds for this endpoint in this uframe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 			itd = list_entry(iso_sched->td_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 					struct ehci_itd, itd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 			list_move_tail(&itd->itd_list, &stream->td_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 			itd->stream = stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 			itd->urb = urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 			itd_init(ehci, stream, itd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 		uframe = next_uframe & 0x07;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 		frame = next_uframe >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 		itd_patch(ehci, itd, iso_sched, packet, uframe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 		next_uframe += stream->uperiod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 		next_uframe &= mod - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 		packet++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 		/* link completed itds into the schedule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 		if (((next_uframe >> 3) != frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 				|| packet == urb->number_of_packets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 			itd_link(ehci, frame & (ehci->periodic_size - 1), itd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 			itd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 	stream->next_uframe = next_uframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	/* don't need that schedule data any more */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	iso_sched_free(stream, iso_sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	urb->hcpriv = stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	++ehci->isoc_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	enable_periodic(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) #define	ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) /* Process and recycle a completed ITD.  Return true iff its urb completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)  * and hence its completion callback probably added things to the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)  * schedule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)  * Note that we carefully avoid recycling this descriptor until after any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)  * completion callback runs, so that it won't be reused quickly.  That is,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)  * assuming (a) no more than two urbs per frame on this endpoint, and also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)  * (b) only this endpoint's completions submit URBs.  It seems some silicon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)  * corrupts things if you reuse completed descriptors very quickly...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	struct urb				*urb = itd->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	struct usb_iso_packet_descriptor	*desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	u32					t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	unsigned				uframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	int					urb_index = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	struct ehci_iso_stream			*stream = itd->stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	bool					retval = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	/* for each uframe with a packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	for (uframe = 0; uframe < 8; uframe++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 		if (likely(itd->index[uframe] == -1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 		urb_index = itd->index[uframe];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		desc = &urb->iso_frame_desc[urb_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 		t = hc32_to_cpup(ehci, &itd->hw_transaction[uframe]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 		itd->hw_transaction[uframe] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 		/* report transfer status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 		if (unlikely(t & ISO_ERRS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 			urb->error_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 			if (t & EHCI_ISOC_BUF_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 				desc->status = usb_pipein(urb->pipe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 					? -ENOSR  /* hc couldn't read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 					: -ECOMM; /* hc couldn't write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 			else if (t & EHCI_ISOC_BABBLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 				desc->status = -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 			else /* (t & EHCI_ISOC_XACTERR) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 				desc->status = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 			/* HC need not update length with this error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 			if (!(t & EHCI_ISOC_BABBLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 				desc->actual_length = EHCI_ITD_LENGTH(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 				urb->actual_length += desc->actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 		} else if (likely((t & EHCI_ISOC_ACTIVE) == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 			desc->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 			desc->actual_length = EHCI_ITD_LENGTH(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 			urb->actual_length += desc->actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 			/* URB was too late */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 			urb->error_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	/* handle completion now? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	if (likely((urb_index + 1) != urb->number_of_packets))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	 * ASSERT: it's really the last itd for this urb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	 * list_for_each_entry (itd, &stream->td_list, itd_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	 *	 BUG_ON(itd->urb == urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	/* give urb back to the driver; completion often (re)submits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	ehci_urb_done(ehci, urb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	retval = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	urb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	--ehci->isoc_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	disable_periodic(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 		if (ehci->amd_pll_fix == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 			usb_amd_quirk_pll_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	if (unlikely(list_is_singular(&stream->td_list)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 		ehci_to_hcd(ehci)->self.bandwidth_allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 				-= stream->bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	itd->urb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	/* Add to the end of the free list for later reuse */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	list_move_tail(&itd->itd_list, &stream->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	/* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	if (list_empty(&stream->td_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 		list_splice_tail_init(&stream->free_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 				&ehci->cached_itd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 		start_free_itds(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) static int itd_submit(struct ehci_hcd *ehci, struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	int			status = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	struct ehci_iso_stream	*stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	/* Get iso_stream head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	stream = iso_stream_find(ehci, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	if (unlikely(stream == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 		ehci_dbg(ehci, "can't get iso stream\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	if (unlikely(urb->interval != stream->uperiod)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 		ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 			stream->uperiod, urb->interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) #ifdef EHCI_URB_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	ehci_dbg(ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 		"%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 		__func__, urb->dev->devpath, urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 		usb_pipeendpoint(urb->pipe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 		usb_pipein(urb->pipe) ? "in" : "out",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 		urb->transfer_buffer_length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 		urb->number_of_packets, urb->interval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 		stream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	/* allocate ITDs w/o locking anything */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	status = itd_urb_transaction(stream, ehci, urb, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	if (unlikely(status < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 		ehci_dbg(ehci, "can't init itds\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	/* schedule ... need to lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	spin_lock_irqsave(&ehci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 		status = -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 		goto done_not_linked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	if (unlikely(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 		goto done_not_linked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	status = iso_stream_schedule(ehci, urb, stream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	if (likely(status == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 		itd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	} else if (status > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 		status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 		ehci_urb_done(ehci, urb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 		usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)  done_not_linked:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	spin_unlock_irqrestore(&ehci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)  done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)  * "Split ISO TDs" ... used for USB 1.1 devices going through the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)  * TTs in USB 2.0 hubs.  These need microframe scheduling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) sitd_sched_init(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 	struct ehci_hcd		*ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	struct ehci_iso_sched	*iso_sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	struct ehci_iso_stream	*stream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 	struct urb		*urb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	unsigned	i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	dma_addr_t	dma = urb->transfer_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	/* how many frames are needed for these transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	iso_sched->span = urb->number_of_packets * stream->ps.period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	/* figure out per-frame sitd fields that we'll need later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	 * when we fit new sitds into the schedule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	for (i = 0; i < urb->number_of_packets; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 		struct ehci_iso_packet	*packet = &iso_sched->packet[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 		unsigned		length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 		dma_addr_t		buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 		u32			trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		length = urb->iso_frame_desc[i].length & 0x03ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 		buf = dma + urb->iso_frame_desc[i].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 		trans = SITD_STS_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 		if (((i + 1) == urb->number_of_packets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 				&& !(urb->transfer_flags & URB_NO_INTERRUPT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 			trans |= SITD_IOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 		trans |= length << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 		packet->transaction = cpu_to_hc32(ehci, trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 		/* might need to cross a buffer page within a td */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 		packet->bufp = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 		packet->buf1 = (buf + length) & ~0x0fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 		if (packet->buf1 != (buf & ~(u64)0x0fff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 			packet->cross = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 		/* OUT uses multiple start-splits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 		if (stream->bEndpointAddress & USB_DIR_IN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 		length = (length + 187) / 188;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 		if (length > 1) /* BEGIN vs ALL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 			length |= 1 << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 		packet->buf1 |= length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) sitd_urb_transaction(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	struct ehci_iso_stream	*stream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 	struct ehci_hcd		*ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	struct urb		*urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 	gfp_t			mem_flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	struct ehci_sitd	*sitd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	dma_addr_t		sitd_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	int			i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	struct ehci_iso_sched	*iso_sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	iso_sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	if (iso_sched == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	sitd_sched_init(ehci, iso_sched, stream, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 	/* allocate/init sITDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	spin_lock_irqsave(&ehci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	for (i = 0; i < urb->number_of_packets; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 		/* NOTE:  for now, we don't try to handle wraparound cases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 		 * for IN (using sitd->hw_backpointer, like a FSTN), which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 		 * means we never need two sitds for full speed packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 		 * Use siTDs from the free list, but not siTDs that may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 		 * still be in use by the hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 		if (likely(!list_empty(&stream->free_list))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 			sitd = list_first_entry(&stream->free_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 					 struct ehci_sitd, sitd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 			if (sitd->frame == ehci->now_frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 				goto alloc_sitd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 			list_del(&sitd->sitd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 			sitd_dma = sitd->sitd_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)  alloc_sitd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 			spin_unlock_irqrestore(&ehci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 			sitd = dma_pool_alloc(ehci->sitd_pool, mem_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 					&sitd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 			spin_lock_irqsave(&ehci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 			if (!sitd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 				iso_sched_free(stream, iso_sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 				spin_unlock_irqrestore(&ehci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 		memset(sitd, 0, sizeof(*sitd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 		sitd->sitd_dma = sitd_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 		sitd->frame = NO_FRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 		list_add(&sitd->sitd_list, &iso_sched->td_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	/* temporarily store schedule info in hcpriv */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	urb->hcpriv = iso_sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	urb->error_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	spin_unlock_irqrestore(&ehci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) sitd_patch(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	struct ehci_hcd		*ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	struct ehci_iso_stream	*stream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	struct ehci_sitd	*sitd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	struct ehci_iso_sched	*iso_sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	unsigned		index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	struct ehci_iso_packet	*uf = &iso_sched->packet[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	u64			bufp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	sitd->hw_next = EHCI_LIST_END(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	sitd->hw_fullspeed_ep = stream->address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	sitd->hw_uframe = stream->splits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	sitd->hw_results = uf->transaction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	sitd->hw_backpointer = EHCI_LIST_END(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	bufp = uf->bufp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 	sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	if (uf->cross)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 		bufp += 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	sitd->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) sitd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	/* note: sitd ordering could matter (CSPLIT then SSPLIT) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	sitd->sitd_next = ehci->pshadow[frame];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 	sitd->hw_next = ehci->periodic[frame];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	ehci->pshadow[frame].sitd = sitd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	sitd->frame = frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) /* fit urb's sitds into the selected schedule slot; activate as needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) static void sitd_link_urb(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	struct ehci_hcd		*ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	struct urb		*urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	unsigned		mod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	struct ehci_iso_stream	*stream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 	int			packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 	unsigned		next_uframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	struct ehci_iso_sched	*sched = urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 	struct ehci_sitd	*sitd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	next_uframe = stream->next_uframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 	if (list_empty(&stream->td_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 		/* usbfs ignores TT bandwidth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 		ehci_to_hcd(ehci)->self.bandwidth_allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 				+= stream->bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 		if (ehci->amd_pll_fix == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 			usb_amd_quirk_pll_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	/* fill sITDs frame by frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	for (packet = sched->first_packet, sitd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 			packet < urb->number_of_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 			packet++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 		/* ASSERT:  we have all necessary sitds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 		BUG_ON(list_empty(&sched->td_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 		/* ASSERT:  no itds for this endpoint in this frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 		sitd = list_entry(sched->td_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 				struct ehci_sitd, sitd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 		list_move_tail(&sitd->sitd_list, &stream->td_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 		sitd->stream = stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 		sitd->urb = urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 		sitd_patch(ehci, stream, sitd, sched, packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 		sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 				sitd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 		next_uframe += stream->uperiod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	stream->next_uframe = next_uframe & (mod - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	/* don't need that schedule data any more */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	iso_sched_free(stream, sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	urb->hcpriv = stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 	++ehci->isoc_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 	enable_periodic(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) #define	SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 				| SITD_STS_XACT | SITD_STS_MMF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) /* Process and recycle a completed SITD.  Return true iff its urb completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195)  * and hence its completion callback probably added things to the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196)  * schedule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)  * Note that we carefully avoid recycling this descriptor until after any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)  * completion callback runs, so that it won't be reused quickly.  That is,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)  * assuming (a) no more than two urbs per frame on this endpoint, and also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)  * (b) only this endpoint's completions submit URBs.  It seems some silicon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)  * corrupts things if you reuse completed descriptors very quickly...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 	struct urb				*urb = sitd->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 	struct usb_iso_packet_descriptor	*desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	u32					t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	int					urb_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	struct ehci_iso_stream			*stream = sitd->stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	bool					retval = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	urb_index = sitd->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	desc = &urb->iso_frame_desc[urb_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 	t = hc32_to_cpup(ehci, &sitd->hw_results);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 	/* report transfer status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 	if (unlikely(t & SITD_ERRS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 		urb->error_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 		if (t & SITD_STS_DBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 			desc->status = usb_pipein(urb->pipe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 				? -ENOSR  /* hc couldn't read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 				: -ECOMM; /* hc couldn't write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 		else if (t & SITD_STS_BABBLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 			desc->status = -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 		else /* XACT, MMF, etc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 			desc->status = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 	} else if (unlikely(t & SITD_STS_ACTIVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 		/* URB was too late */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 		urb->error_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 		desc->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 		desc->actual_length = desc->length - SITD_LENGTH(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 		urb->actual_length += desc->actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 	/* handle completion now? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	if ((urb_index + 1) != urb->number_of_packets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	 * ASSERT: it's really the last sitd for this urb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	 * list_for_each_entry (sitd, &stream->td_list, sitd_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	 *	 BUG_ON(sitd->urb == urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	/* give urb back to the driver; completion often (re)submits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 	ehci_urb_done(ehci, urb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 	retval = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 	urb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 	--ehci->isoc_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 	disable_periodic(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 		if (ehci->amd_pll_fix == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 			usb_amd_quirk_pll_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 	if (list_is_singular(&stream->td_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 		ehci_to_hcd(ehci)->self.bandwidth_allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 				-= stream->bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 	sitd->urb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 	/* Add to the end of the free list for later reuse */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 	list_move_tail(&sitd->sitd_list, &stream->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	/* Recycle the siTDs when the pipeline is empty (ep no longer in use) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	if (list_empty(&stream->td_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 		list_splice_tail_init(&stream->free_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 				&ehci->cached_sitd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 		start_free_itds(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) static int sitd_submit(struct ehci_hcd *ehci, struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	int			status = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	struct ehci_iso_stream	*stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 	/* Get iso_stream head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	stream = iso_stream_find(ehci, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	if (stream == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 		ehci_dbg(ehci, "can't get iso stream\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	if (urb->interval != stream->ps.period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 		ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 			stream->ps.period, urb->interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) #ifdef EHCI_URB_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 	ehci_dbg(ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 		"submit %p dev%s ep%d%s-iso len %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 		urb, urb->dev->devpath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 		usb_pipeendpoint(urb->pipe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 		usb_pipein(urb->pipe) ? "in" : "out",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 		urb->transfer_buffer_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 	/* allocate SITDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	status = sitd_urb_transaction(stream, ehci, urb, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 	if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 		ehci_dbg(ehci, "can't init sitds\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	/* schedule ... need to lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	spin_lock_irqsave(&ehci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 		status = -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 		goto done_not_linked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 	status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 	if (unlikely(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 		goto done_not_linked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 	status = iso_stream_schedule(ehci, urb, stream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	if (likely(status == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 		sitd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 	} else if (status > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 		status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 		ehci_urb_done(ehci, urb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 		usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335)  done_not_linked:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	spin_unlock_irqrestore(&ehci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)  done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) static void scan_isoc(struct ehci_hcd *ehci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	unsigned		uf, now_frame, frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	unsigned		fmask = ehci->periodic_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 	bool			modified, live;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	union ehci_shadow	q, *q_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	__hc32			type, *hw_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 	 * When running, scan from last scan point up to "now"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 	 * else clean up by scanning everything that's left.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 	 * Touches as few pages as possible:  cache-friendly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 	if (ehci->rh_state >= EHCI_RH_RUNNING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 		uf = ehci_read_frame_index(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 		now_frame = (uf >> 3) & fmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 		live = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 	} else  {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 		now_frame = (ehci->last_iso_frame - 1) & fmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 		live = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	ehci->now_frame = now_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	frame = ehci->last_iso_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 	/* Scan each element in frame's queue for completions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 	q_p = &ehci->pshadow[frame];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 	hw_p = &ehci->periodic[frame];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 	q.ptr = q_p->ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	type = Q_NEXT_TYPE(ehci, *hw_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	modified = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	while (q.ptr != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 		switch (hc32_to_cpu(ehci, type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 		case Q_TYPE_ITD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 			 * If this ITD is still active, leave it for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 			 * later processing ... check the next entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 			 * No need to check for activity unless the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 			 * frame is current.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 			if (frame == now_frame && live) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 				rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 				for (uf = 0; uf < 8; uf++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 					if (q.itd->hw_transaction[uf] &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 							ITD_ACTIVE(ehci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 						break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 				if (uf < 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 					q_p = &q.itd->itd_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 					hw_p = &q.itd->hw_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 					type = Q_NEXT_TYPE(ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 							q.itd->hw_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 					q = *q_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 			 * Take finished ITDs out of the schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 			 * and process them:  recycle, maybe report
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 			 * URB completion.  HC won't cache the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 			 * pointer for much longer, if at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 			*q_p = q.itd->itd_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 			if (!ehci->use_dummy_qh ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 					q.itd->hw_next != EHCI_LIST_END(ehci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 				*hw_p = q.itd->hw_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 				*hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 			type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 			wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 			modified = itd_complete(ehci, q.itd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 			q = *q_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 		case Q_TYPE_SITD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 			 * If this SITD is still active, leave it for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 			 * later processing ... check the next entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 			 * No need to check for activity unless the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 			 * frame is current.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 			if (((frame == now_frame) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 					(((frame + 1) & fmask) == now_frame))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 				&& live
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 				&& (q.sitd->hw_results & SITD_ACTIVE(ehci))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 				q_p = &q.sitd->sitd_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 				hw_p = &q.sitd->hw_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 				type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 				q = *q_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 			 * Take finished SITDs out of the schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 			 * and process them:  recycle, maybe report
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 			 * URB completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 			*q_p = q.sitd->sitd_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 			if (!ehci->use_dummy_qh ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 					q.sitd->hw_next != EHCI_LIST_END(ehci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 				*hw_p = q.sitd->hw_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 				*hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 			type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 			wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 			modified = sitd_complete(ehci, q.sitd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 			q = *q_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 			ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 					type, frame, q.ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 			/* BUG(); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 		case Q_TYPE_QH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 		case Q_TYPE_FSTN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 			/* End of the iTDs and siTDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 			q.ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 		/* Assume completion callbacks modify the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 		if (unlikely(modified && ehci->isoc_count > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 			goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 	/* Stop when we have reached the current frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 	if (frame == now_frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 	/* The last frame may still have active siTDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 	ehci->last_iso_frame = frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 	frame = (frame + 1) & fmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 	goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) }