^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Universal Host Controller Interface driver for USB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Maintainer: Alan Stern <stern@rowland.harvard.edu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * (C) Copyright 1999 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * (C) Copyright 1999 Randy Dunlap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * (C) Copyright 1999 Georg Acher, acher@in.tum.de
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Technically, updating td->status here is a race, but it's not really a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * problem. The worst that can happen is that we set the IOC bit again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * generating a spurious interrupt. We could fix this by creating another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * QH and leaving the IOC bit always set, but then we would have to play
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * games with the FSBR code to make sure we get the correct order in all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * the cases. I don't think it's worth the effort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static void uhci_set_next_interrupt(struct uhci_hcd *uhci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) if (uhci->is_stopped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) uhci->term_td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) uhci->term_td->status &= ~cpu_to_hc32(uhci, TD_CTRL_IOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * Full-Speed Bandwidth Reclamation (FSBR).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * We turn on FSBR whenever a queue that wants it is advancing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * and leave it on for a short time thereafter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static void uhci_fsbr_on(struct uhci_hcd *uhci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct uhci_qh *lqh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* The terminating skeleton QH always points back to the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * FSBR QH. Make the last async QH point to the terminating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * skeleton QH. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) uhci->fsbr_is_on = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) lqh = list_entry(uhci->skel_async_qh->node.prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct uhci_qh, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) lqh->link = LINK_TO_QH(uhci, uhci->skel_term_qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static void uhci_fsbr_off(struct uhci_hcd *uhci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct uhci_qh *lqh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /* Remove the link from the last async QH to the terminating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * skeleton QH. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) uhci->fsbr_is_on = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) lqh = list_entry(uhci->skel_async_qh->node.prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct uhci_qh, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) lqh->link = UHCI_PTR_TERM(uhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct urb_priv *urbp = urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) urbp->fsbr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (urbp->fsbr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) uhci->fsbr_is_wanted = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (!uhci->fsbr_is_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) uhci_fsbr_on(uhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) else if (uhci->fsbr_expiring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) uhci->fsbr_expiring = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) del_timer(&uhci->fsbr_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static void uhci_fsbr_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct uhci_hcd *uhci = from_timer(uhci, t, fsbr_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) spin_lock_irqsave(&uhci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (uhci->fsbr_expiring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) uhci->fsbr_expiring = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) uhci_fsbr_off(uhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) spin_unlock_irqrestore(&uhci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) dma_addr_t dma_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct uhci_td *td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (!td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) td->dma_handle = dma_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) td->frame = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) INIT_LIST_HEAD(&td->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) INIT_LIST_HEAD(&td->fl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (!list_empty(&td->list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) dev_WARN(uhci_dev(uhci), "td %p still in list!\n", td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (!list_empty(&td->fl_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) dev_WARN(uhci_dev(uhci), "td %p still in fl_list!\n", td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) dma_pool_free(uhci->td_pool, td, td->dma_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static inline void uhci_fill_td(struct uhci_hcd *uhci, struct uhci_td *td,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) u32 status, u32 token, u32 buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) td->status = cpu_to_hc32(uhci, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) td->token = cpu_to_hc32(uhci, token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) td->buffer = cpu_to_hc32(uhci, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) list_add_tail(&td->list, &urbp->td_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static void uhci_remove_td_from_urbp(struct uhci_td *td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) list_del_init(&td->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * We insert Isochronous URBs directly into the frame list at the beginning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct uhci_td *td, unsigned framenum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) framenum &= (UHCI_NUMFRAMES - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) td->frame = framenum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /* Is there a TD already mapped there? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (uhci->frame_cpu[framenum]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct uhci_td *ftd, *ltd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) ftd = uhci->frame_cpu[framenum];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) list_add_tail(&td->fl_list, &ftd->fl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) td->link = ltd->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) ltd->link = LINK_TO_TD(uhci, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) td->link = uhci->frame[framenum];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) uhci->frame[framenum] = LINK_TO_TD(uhci, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) uhci->frame_cpu[framenum] = td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct uhci_td *td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /* If it's not inserted, don't remove it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (td->frame == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) WARN_ON(!list_empty(&td->fl_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (uhci->frame_cpu[td->frame] == td) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (list_empty(&td->fl_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) uhci->frame[td->frame] = td->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) uhci->frame_cpu[td->frame] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct uhci_td *ntd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) ntd = list_entry(td->fl_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct uhci_td,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) fl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) uhci->frame[td->frame] = LINK_TO_TD(uhci, ntd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) uhci->frame_cpu[td->frame] = ntd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct uhci_td *ptd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) ptd->link = td->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) list_del_init(&td->fl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) td->frame = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) unsigned int framenum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct uhci_td *ftd, *ltd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) framenum &= (UHCI_NUMFRAMES - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) ftd = uhci->frame_cpu[framenum];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (ftd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) uhci->frame[framenum] = ltd->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) uhci->frame_cpu[framenum] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) while (!list_empty(&ftd->fl_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) list_del_init(ftd->fl_list.prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * Remove all the TDs for an Isochronous URB from the frame list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct uhci_td *td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) list_for_each_entry(td, &urbp->td_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) uhci_remove_td_from_frame_list(uhci, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct usb_device *udev, struct usb_host_endpoint *hep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) dma_addr_t dma_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct uhci_qh *qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) qh = dma_pool_zalloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (!qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) qh->dma_handle = dma_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) qh->element = UHCI_PTR_TERM(uhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) qh->link = UHCI_PTR_TERM(uhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) INIT_LIST_HEAD(&qh->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) INIT_LIST_HEAD(&qh->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (udev) { /* Normal QH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) qh->type = usb_endpoint_type(&hep->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (qh->type != USB_ENDPOINT_XFER_ISOC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) qh->dummy_td = uhci_alloc_td(uhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (!qh->dummy_td) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) dma_pool_free(uhci->qh_pool, qh, dma_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) qh->state = QH_STATE_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) qh->hep = hep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) qh->udev = udev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) hep->hcpriv = qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (qh->type == USB_ENDPOINT_XFER_INT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) qh->type == USB_ENDPOINT_XFER_ISOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) qh->load = usb_calc_bus_time(udev->speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) usb_endpoint_dir_in(&hep->desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) qh->type == USB_ENDPOINT_XFER_ISOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) usb_endpoint_maxp(&hep->desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) / 1000 + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) } else { /* Skeleton QH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) qh->state = QH_STATE_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) qh->type = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (!list_empty(&qh->queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) dev_WARN(uhci_dev(uhci), "qh %p list not empty!\n", qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) list_del(&qh->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (qh->udev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) qh->hep->hcpriv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (qh->dummy_td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) uhci_free_td(uhci, qh->dummy_td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * When a queue is stopped and a dequeued URB is given back, adjust
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * the previous TD link (if the URB isn't first on the queue) or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * save its toggle value (if it is first and is currently executing).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * Returns 0 if the URB should not yet be given back, 1 otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct urb_priv *urbp = urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct uhci_td *td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) int ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /* Isochronous pipes don't use toggles and their TD link pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * get adjusted during uhci_urb_dequeue(). But since their queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * cannot truly be stopped, we have to watch out for dequeues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * occurring after the nominal unlink frame. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (qh->type == USB_ENDPOINT_XFER_ISOC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) ret = (uhci->frame_number + uhci->is_stopped !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) qh->unlink_frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* If the URB isn't first on its queue, adjust the link pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * of the last TD in the previous URB. The toggle doesn't need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * to be saved since this URB can't be executing yet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (qh->queue.next != &urbp->node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct urb_priv *purbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) struct uhci_td *ptd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) purbp = list_entry(urbp->node.prev, struct urb_priv, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) WARN_ON(list_empty(&purbp->td_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) ptd = list_entry(purbp->td_list.prev, struct uhci_td,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) td = list_entry(urbp->td_list.prev, struct uhci_td,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) ptd->link = td->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /* If the QH element pointer is UHCI_PTR_TERM then then currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * executing URB has already been unlinked, so this one isn't it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (qh_element(qh) == UHCI_PTR_TERM(uhci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) qh->element = UHCI_PTR_TERM(uhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /* Control pipes don't have to worry about toggles */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (qh->type == USB_ENDPOINT_XFER_CONTROL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /* Save the next toggle value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) WARN_ON(list_empty(&urbp->td_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) td = list_entry(urbp->td_list.next, struct uhci_td, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) qh->needs_fixup = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) qh->initial_toggle = uhci_toggle(td_token(uhci, td));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * Fix up the data toggles for URBs in a queue, when one of them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * terminates early (short transfer, error, or dequeued).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static void uhci_fixup_toggles(struct uhci_hcd *uhci, struct uhci_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) int skip_first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct urb_priv *urbp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct uhci_td *td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) unsigned int toggle = qh->initial_toggle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) unsigned int pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* Fixups for a short transfer start with the second URB in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * queue (the short URB is the first). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (skip_first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) urbp = list_entry(qh->queue.next, struct urb_priv, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /* When starting with the first URB, if the QH element pointer is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * still valid then we know the URB's toggles are okay. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) else if (qh_element(qh) != UHCI_PTR_TERM(uhci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) toggle = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /* Fix up the toggle for the URBs in the queue. Normally this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * loop won't run more than once: When an error or short transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * occurs, the queue usually gets emptied. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) urbp = list_prepare_entry(urbp, &qh->queue, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) list_for_each_entry_continue(urbp, &qh->queue, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /* If the first TD has the right toggle value, we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * need to change any toggles in this URB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) td = list_entry(urbp->td_list.next, struct uhci_td, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (toggle > 1 || uhci_toggle(td_token(uhci, td)) == toggle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) td = list_entry(urbp->td_list.prev, struct uhci_td,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) toggle = uhci_toggle(td_token(uhci, td)) ^ 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /* Otherwise all the toggles in the URB have to be switched */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) list_for_each_entry(td, &urbp->td_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) td->token ^= cpu_to_hc32(uhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) TD_TOKEN_TOGGLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) toggle ^= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) usb_settoggle(qh->udev, usb_pipeendpoint(pipe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) usb_pipeout(pipe), toggle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) qh->needs_fixup = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * Link an Isochronous QH into its skeleton's list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) list_add_tail(&qh->node, &uhci->skel_iso_qh->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) /* Isochronous QHs aren't linked by the hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * Link a high-period interrupt QH into the schedule at the end of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * skeleton's list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct uhci_qh *pqh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) pqh = list_entry(qh->node.prev, struct uhci_qh, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) qh->link = pqh->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) pqh->link = LINK_TO_QH(uhci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * Link a period-1 interrupt or async QH into the schedule at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * correct spot in the async skeleton's list, and update the FSBR link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct uhci_qh *pqh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) __hc32 link_to_new_qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /* Find the predecessor QH for our new one and insert it in the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * The list of QHs is expected to be short, so linear search won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * take too long. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (pqh->skel <= qh->skel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) list_add(&qh->node, &pqh->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) /* Link it into the schedule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) qh->link = pqh->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) link_to_new_qh = LINK_TO_QH(uhci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) pqh->link = link_to_new_qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /* If this is now the first FSBR QH, link the terminating skeleton
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * QH to it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) uhci->skel_term_qh->link = link_to_new_qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * Put a QH on the schedule in both hardware and software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) WARN_ON(list_empty(&qh->queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /* Set the element pointer if it isn't set already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * This isn't needed for Isochronous queues, but it doesn't hurt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (qh_element(qh) == UHCI_PTR_TERM(uhci)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct urb_priv *urbp = list_entry(qh->queue.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) struct urb_priv, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct uhci_td *td = list_entry(urbp->td_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct uhci_td, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) qh->element = LINK_TO_TD(uhci, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /* Treat the queue as if it has just advanced */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) qh->wait_expired = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) qh->advance_jiffies = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (qh->state == QH_STATE_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) qh->state = QH_STATE_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /* Move the QH from its old list to the correct spot in the appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * skeleton's list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (qh == uhci->next_qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) list_del(&qh->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (qh->skel == SKEL_ISO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) link_iso(uhci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) else if (qh->skel < SKEL_ASYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) link_interrupt(uhci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) link_async(uhci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * Unlink a high-period interrupt QH from the schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) struct uhci_qh *pqh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) pqh = list_entry(qh->node.prev, struct uhci_qh, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) pqh->link = qh->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * Unlink a period-1 interrupt or async QH from the schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct uhci_qh *pqh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) __hc32 link_to_next_qh = qh->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) pqh = list_entry(qh->node.prev, struct uhci_qh, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) pqh->link = link_to_next_qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /* If this was the old first FSBR QH, link the terminating skeleton
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * QH to the next (new first FSBR) QH. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) uhci->skel_term_qh->link = link_to_next_qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * Take a QH off the hardware schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (qh->state == QH_STATE_UNLINKING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) qh->state = QH_STATE_UNLINKING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) /* Unlink the QH from the schedule and record when we did it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (qh->skel == SKEL_ISO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) else if (qh->skel < SKEL_ASYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) unlink_interrupt(uhci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) unlink_async(uhci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) uhci_get_current_frame_number(uhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) qh->unlink_frame = uhci->frame_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) /* Force an interrupt so we know when the QH is fully unlinked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (list_empty(&uhci->skel_unlink_qh->node) || uhci->is_stopped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) uhci_set_next_interrupt(uhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /* Move the QH from its old list to the end of the unlinking list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (qh == uhci->next_qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * When we and the controller are through with a QH, it becomes IDLE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * This happens when a QH has been off the schedule (on the unlinking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * list) for more than one frame, or when an error occurs while adding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * the first URB onto a new QH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) WARN_ON(qh->state == QH_STATE_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (qh == uhci->next_qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) list_move(&qh->node, &uhci->idle_qh_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) qh->state = QH_STATE_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /* Now that the QH is idle, its post_td isn't being used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (qh->post_td) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) uhci_free_td(uhci, qh->post_td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) qh->post_td = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) /* If anyone is waiting for a QH to become idle, wake them up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (uhci->num_waiting)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) wake_up_all(&uhci->waitqh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * Find the highest existing bandwidth load for a given phase and period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) int highest_load = uhci->load[phase];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) for (phase += period; phase < MAX_PHASE; phase += period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) highest_load = max_t(int, highest_load, uhci->load[phase]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return highest_load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * Set qh->phase to the optimal phase for a periodic transfer and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * check whether the bandwidth requirement is acceptable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) int minimax_load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) /* Find the optimal phase (unless it is already set) and get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * its load value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (qh->phase >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) int phase, load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) int max_phase = min_t(int, MAX_PHASE, qh->period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) qh->phase = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) for (phase = 1; phase < max_phase; ++phase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) load = uhci_highest_load(uhci, phase, qh->period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (load < minimax_load) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) minimax_load = load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) qh->phase = phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (minimax_load + qh->load > 900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) "period %d, phase %d, %d + %d us\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) qh->period, qh->phase, minimax_load, qh->load);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * Reserve a periodic QH's bandwidth in the schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) int load = qh->load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) char *p = "??";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) uhci->load[i] += load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) uhci->total_load += load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) uhci_to_hcd(uhci)->self.bandwidth_allocated =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) uhci->total_load / MAX_PHASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) switch (qh->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) case USB_ENDPOINT_XFER_INT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) ++uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) p = "INT";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) case USB_ENDPOINT_XFER_ISOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) ++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) p = "ISO";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) qh->bandwidth_reserved = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) dev_dbg(uhci_dev(uhci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) "reserve", qh->udev->devnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) qh->hep->desc.bEndpointAddress, p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) qh->period, qh->phase, load);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * Release a periodic QH's bandwidth reservation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) int load = qh->load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) char *p = "??";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) uhci->load[i] -= load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) uhci->total_load -= load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) uhci_to_hcd(uhci)->self.bandwidth_allocated =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) uhci->total_load / MAX_PHASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) switch (qh->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) case USB_ENDPOINT_XFER_INT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) --uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) p = "INT";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) case USB_ENDPOINT_XFER_ISOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) --uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) p = "ISO";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) qh->bandwidth_reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) dev_dbg(uhci_dev(uhci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) "release", qh->udev->devnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) qh->hep->desc.bEndpointAddress, p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) qh->period, qh->phase, load);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) struct urb_priv *urbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (!urbp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) urbp->urb = urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) urb->hcpriv = urbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) INIT_LIST_HEAD(&urbp->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) INIT_LIST_HEAD(&urbp->td_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return urbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) static void uhci_free_urb_priv(struct uhci_hcd *uhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) struct urb_priv *urbp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) struct uhci_td *td, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (!list_empty(&urbp->node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) dev_WARN(uhci_dev(uhci), "urb %p still on QH's list!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) urbp->urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) uhci_remove_td_from_urbp(td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) uhci_free_td(uhci, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) kmem_cache_free(uhci_up_cachep, urbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * Map status to standard result codes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * <status> is (td_status(uhci, td) & 0xF60000), a.k.a.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * uhci_status_bits(td_status(uhci, td)).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * Note: <status> does not include the TD_CTRL_NAK bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * <dir_out> is True for output TDs and False for input TDs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) static int uhci_map_status(int status, int dir_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) if (!status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (dir_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (status & TD_CTRL_BABBLE) /* Babble */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (status & TD_CTRL_DBUFERR) /* Buffer error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) return -ENOSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (status & TD_CTRL_STALLED) /* Stalled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * Control transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) struct uhci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) struct uhci_td *td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) unsigned long destination, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) int maxsze = usb_endpoint_maxp(&qh->hep->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) int len = urb->transfer_buffer_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) dma_addr_t data = urb->transfer_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) __hc32 *plink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) struct urb_priv *urbp = urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) int skel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) /* The "pipe" thing contains the destination in bits 8--18 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) /* 3 errors, dummy TD remains inactive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) status = uhci_maxerr(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (urb->dev->speed == USB_SPEED_LOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) status |= TD_CTRL_LS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * Build the TD for the control request setup packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) td = qh->dummy_td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) uhci_add_td_to_urbp(td, urbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) uhci_fill_td(uhci, td, status, destination | uhci_explen(8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) urb->setup_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) plink = &td->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) status |= TD_CTRL_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * If direction is "send", change the packet ID from SETUP (0x2D)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * to OUT (0xE1). Else change it from SETUP to IN (0x69) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * set Short Packet Detect (SPD) for all data packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * 0-length transfers always get treated as "send".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (usb_pipeout(urb->pipe) || len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) destination ^= (USB_PID_SETUP ^ USB_PID_IN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) status |= TD_CTRL_SPD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * Build the DATA TDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) while (len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) int pktsze = maxsze;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (len <= pktsze) { /* The last data packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) pktsze = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) status &= ~TD_CTRL_SPD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) td = uhci_alloc_td(uhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (!td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) *plink = LINK_TO_TD(uhci, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) /* Alternate Data0/1 (start with Data1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) destination ^= TD_TOKEN_TOGGLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) uhci_add_td_to_urbp(td, urbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) uhci_fill_td(uhci, td, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) destination | uhci_explen(pktsze), data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) plink = &td->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) data += pktsze;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) len -= pktsze;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * Build the final TD for control status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) td = uhci_alloc_td(uhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (!td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) *plink = LINK_TO_TD(uhci, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) /* Change direction for the status transaction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) destination ^= (USB_PID_IN ^ USB_PID_OUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) destination |= TD_TOKEN_TOGGLE; /* End in Data1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) uhci_add_td_to_urbp(td, urbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) uhci_fill_td(uhci, td, status | TD_CTRL_IOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) destination | uhci_explen(0), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) plink = &td->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * Build the new dummy TD and activate the old one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) td = uhci_alloc_td(uhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (!td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) *plink = LINK_TO_TD(uhci, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) uhci_fill_td(uhci, td, 0, USB_PID_OUT | uhci_explen(0), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) qh->dummy_td = td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) /* Low-speed transfers get a different queue, and won't hog the bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * Also, some devices enumerate better without FSBR; the easiest way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * to do that is to put URBs on the low-speed queue while the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * isn't in the CONFIGURED state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (urb->dev->speed == USB_SPEED_LOW ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) urb->dev->state != USB_STATE_CONFIGURED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) skel = SKEL_LS_CONTROL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) skel = SKEL_FS_CONTROL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) uhci_add_fsbr(uhci, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (qh->state != QH_STATE_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) qh->skel = skel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) /* Remove the dummy TD from the td_list so it doesn't get freed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) uhci_remove_td_from_urbp(qh->dummy_td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * Common submit for bulk and interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) struct uhci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) struct uhci_td *td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) unsigned long destination, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) int maxsze = usb_endpoint_maxp(&qh->hep->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) int len = urb->transfer_buffer_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) int this_sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) dma_addr_t data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) __hc32 *plink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) struct urb_priv *urbp = urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) unsigned int toggle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (len < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) /* The "pipe" thing contains the destination in bits 8--18 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) usb_pipeout(urb->pipe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) /* 3 errors, dummy TD remains inactive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) status = uhci_maxerr(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (urb->dev->speed == USB_SPEED_LOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) status |= TD_CTRL_LS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (usb_pipein(urb->pipe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) status |= TD_CTRL_SPD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) i = urb->num_mapped_sgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (len > 0 && i > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) sg = urb->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) data = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) /* urb->transfer_buffer_length may be smaller than the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * size of the scatterlist (or vice versa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) this_sg_len = min_t(int, sg_dma_len(sg), len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) sg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) data = urb->transfer_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) this_sg_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * Build the DATA TDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) plink = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) td = qh->dummy_td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) for (;;) { /* Allow zero length packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) int pktsze = maxsze;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (len <= pktsze) { /* The last packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) pktsze = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) status &= ~TD_CTRL_SPD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (plink) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) td = uhci_alloc_td(uhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (!td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) *plink = LINK_TO_TD(uhci, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) uhci_add_td_to_urbp(td, urbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) uhci_fill_td(uhci, td, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) destination | uhci_explen(pktsze) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) (toggle << TD_TOKEN_TOGGLE_SHIFT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) plink = &td->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) status |= TD_CTRL_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) toggle ^= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) data += pktsze;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) this_sg_len -= pktsze;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) len -= maxsze;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (this_sg_len <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (--i <= 0 || len <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) data = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) this_sg_len = min_t(int, sg_dma_len(sg), len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * URB_ZERO_PACKET means adding a 0-length packet, if direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * is OUT and the transfer_length was an exact multiple of maxsze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * hence (len = transfer_length - N * maxsze) == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * however, if transfer_length == 0, the zero packet was already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * prepared above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if ((urb->transfer_flags & URB_ZERO_PACKET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) usb_pipeout(urb->pipe) && len == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) urb->transfer_buffer_length > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) td = uhci_alloc_td(uhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (!td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) *plink = LINK_TO_TD(uhci, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) uhci_add_td_to_urbp(td, urbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) uhci_fill_td(uhci, td, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) destination | uhci_explen(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) (toggle << TD_TOKEN_TOGGLE_SHIFT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) plink = &td->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) toggle ^= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) /* Set the interrupt-on-completion flag on the last packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * A more-or-less typical 4 KB URB (= size of one memory page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * will require about 3 ms to transfer; that's a little on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * fast side but not enough to justify delaying an interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * flag setting. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * Build the new dummy TD and activate the old one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) td = uhci_alloc_td(uhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (!td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) *plink = LINK_TO_TD(uhci, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) uhci_fill_td(uhci, td, 0, USB_PID_OUT | uhci_explen(0), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) qh->dummy_td = td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) usb_pipeout(urb->pipe), toggle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) /* Remove the dummy TD from the td_list so it doesn't get freed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) uhci_remove_td_from_urbp(qh->dummy_td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) struct uhci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) /* Can't have low-speed bulk transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (urb->dev->speed == USB_SPEED_LOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (qh->state != QH_STATE_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) qh->skel = SKEL_BULK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) ret = uhci_submit_common(uhci, urb, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) uhci_add_fsbr(uhci, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) struct uhci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) /* USB 1.1 interrupt transfers only involve one packet per interval.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) * Drivers can submit URBs of any length, but longer ones will need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) * multiple intervals to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) if (!qh->bandwidth_reserved) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) int exponent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) /* Figure out which power-of-two queue to use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) for (exponent = 7; exponent >= 0; --exponent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if ((1 << exponent) <= urb->interval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (exponent < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) /* If the slot is full, try a lower period */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) qh->period = 1 << exponent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) qh->skel = SKEL_INDEX(exponent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) /* For now, interrupt phase is fixed by the layout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) * of the QH lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) qh->phase = (qh->period / 2) & (MAX_PHASE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) ret = uhci_check_bandwidth(uhci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) } while (ret != 0 && --exponent >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) } else if (qh->period > urb->interval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) return -EINVAL; /* Can't decrease the period */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) ret = uhci_submit_common(uhci, urb, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) urb->interval = qh->period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (!qh->bandwidth_reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) uhci_reserve_bandwidth(uhci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * Fix up the data structures following a short transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) static int uhci_fixup_short_transfer(struct uhci_hcd *uhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) struct uhci_qh *qh, struct urb_priv *urbp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) struct uhci_td *td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) struct list_head *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) td = list_entry(urbp->td_list.prev, struct uhci_td, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) /* When a control transfer is short, we have to restart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * the queue at the status stage transaction, which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * the last TD. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) WARN_ON(list_empty(&urbp->td_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) qh->element = LINK_TO_TD(uhci, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) tmp = td->list.prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) ret = -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) /* When a bulk/interrupt transfer is short, we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * fix up the toggles of the following URBs on the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * before restarting the queue at the next URB. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) qh->initial_toggle =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) uhci_toggle(td_token(uhci, qh->post_td)) ^ 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) uhci_fixup_toggles(uhci, qh, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (list_empty(&urbp->td_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) td = qh->post_td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) qh->element = td->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) tmp = urbp->td_list.prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) /* Remove all the TDs we skipped over, from tmp back to the start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) while (tmp != &urbp->td_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) td = list_entry(tmp, struct uhci_td, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) tmp = tmp->prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) uhci_remove_td_from_urbp(td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) uhci_free_td(uhci, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * Common result for control, bulk, and interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) struct urb_priv *urbp = urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) struct uhci_qh *qh = urbp->qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) struct uhci_td *td, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) unsigned status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) unsigned int ctrlstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) ctrlstat = td_status(uhci, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) status = uhci_status_bits(ctrlstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) if (status & TD_CTRL_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) len = uhci_actual_length(ctrlstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) urb->actual_length += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) ret = uhci_map_status(status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) uhci_packetout(td_token(uhci, td)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if ((debug == 1 && ret != -EPIPE) || debug > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) /* Some debugging code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) dev_dbg(&urb->dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) "%s: failed with status %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) __func__, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (debug > 1 && errbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) /* Print the chain for debugging */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) uhci_show_qh(uhci, urbp->qh, errbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) ERRBUF_LEN - EXTRA_SPACE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) lprintk(errbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) /* Did we receive a short packet? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) } else if (len < uhci_expected_length(td_token(uhci, td))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) /* For control transfers, go to the status TD if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * this isn't already the last data TD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (td->list.next != urbp->td_list.prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) /* For bulk and interrupt, this may be an error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) else if (urb->transfer_flags & URB_SHORT_NOT_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) ret = -EREMOTEIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) /* Fixup needed only if this isn't the URB's last TD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) else if (&td->list != urbp->td_list.prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) uhci_remove_td_from_urbp(td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (qh->post_td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) uhci_free_td(uhci, qh->post_td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) qh->post_td = td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) /* Note that the queue has stopped and save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * the next toggle value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) qh->element = UHCI_PTR_TERM(uhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) qh->is_stopped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) qh->initial_toggle = uhci_toggle(td_token(uhci, td)) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) (ret == -EREMOTEIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) } else /* Short packet received */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) ret = uhci_fixup_short_transfer(uhci, qh, urbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) * Isochronous transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) struct uhci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) unsigned frame, next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) unsigned long destination, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) /* Values must not be too big (could overflow below) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (urb->interval >= UHCI_NUMFRAMES ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) urb->number_of_packets >= UHCI_NUMFRAMES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) return -EFBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) uhci_get_current_frame_number(uhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) /* Check the period and figure out the starting frame number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (!qh->bandwidth_reserved) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) qh->period = urb->interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) qh->phase = -1; /* Find the best phase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) i = uhci_check_bandwidth(uhci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) if (i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) /* Allow a little time to allocate the TDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) next = uhci->frame_number + 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) frame = qh->phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) /* Round up to the first available slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) frame += (next - frame + qh->period - 1) & -qh->period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) } else if (qh->period != urb->interval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) return -EINVAL; /* Can't change the period */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) next = uhci->frame_number + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) /* Find the next unused frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) if (list_empty(&qh->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) frame = qh->iso_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) struct urb *lurb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) lurb = list_entry(qh->queue.prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) struct urb_priv, node)->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) frame = lurb->start_frame +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) lurb->number_of_packets *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) lurb->interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) /* Fell behind? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (!uhci_frame_before_eq(next, frame)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) /* USB_ISO_ASAP: Round up to the first available slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (urb->transfer_flags & URB_ISO_ASAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) frame += (next - frame + qh->period - 1) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) -qh->period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) * Not ASAP: Use the next slot in the stream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) * no matter what.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) else if (!uhci_frame_before_eq(next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) frame + (urb->number_of_packets - 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) qh->period))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) dev_dbg(uhci_dev(uhci), "iso underrun %p (%u+%u < %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) urb, frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) (urb->number_of_packets - 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) qh->period,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) /* Make sure we won't have to go too far into the future */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) frame + urb->number_of_packets * urb->interval))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) return -EFBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) urb->start_frame = frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) for (i = 0; i < urb->number_of_packets; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) td = uhci_alloc_td(uhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (!td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) uhci_add_td_to_urbp(td, urbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) uhci_fill_td(uhci, td, status, destination |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) uhci_explen(urb->iso_frame_desc[i].length),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) urb->transfer_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) urb->iso_frame_desc[i].offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) /* Set the interrupt-on-completion flag on the last packet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) /* Add the TDs to the frame list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) frame = urb->start_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) list_for_each_entry(td, &urbp->td_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) uhci_insert_td_in_frame_list(uhci, td, frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) frame += qh->period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) if (list_empty(&qh->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) qh->iso_packet_desc = &urb->iso_frame_desc[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) qh->iso_frame = urb->start_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) qh->skel = SKEL_ISO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) if (!qh->bandwidth_reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) uhci_reserve_bandwidth(uhci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) struct uhci_td *td, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) struct urb_priv *urbp = urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) struct uhci_qh *qh = urbp->qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) unsigned int ctrlstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) int actlength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) if (uhci_frame_before_eq(uhci->cur_iso_frame, qh->iso_frame))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) uhci_remove_tds_from_frame(uhci, qh->iso_frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) ctrlstat = td_status(uhci, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (ctrlstat & TD_CTRL_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) status = -EXDEV; /* TD was added too late? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) status = uhci_map_status(uhci_status_bits(ctrlstat),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) usb_pipeout(urb->pipe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) actlength = uhci_actual_length(ctrlstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) urb->actual_length += actlength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) qh->iso_packet_desc->actual_length = actlength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) qh->iso_packet_desc->status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) urb->error_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) uhci_remove_td_from_urbp(td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) uhci_free_td(uhci, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) qh->iso_frame += qh->period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) ++qh->iso_packet_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) static int uhci_urb_enqueue(struct usb_hcd *hcd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) struct urb *urb, gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) struct uhci_hcd *uhci = hcd_to_uhci(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) struct urb_priv *urbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) struct uhci_qh *qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) spin_lock_irqsave(&uhci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) ret = usb_hcd_link_urb_to_ep(hcd, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) goto done_not_linked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) urbp = uhci_alloc_urb_priv(uhci, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (!urbp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (urb->ep->hcpriv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) qh = urb->ep->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) qh = uhci_alloc_qh(uhci, urb->dev, urb->ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) if (!qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) goto err_no_qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) urbp->qh = qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) switch (qh->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) case USB_ENDPOINT_XFER_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) ret = uhci_submit_control(uhci, urb, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) case USB_ENDPOINT_XFER_BULK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) ret = uhci_submit_bulk(uhci, urb, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) case USB_ENDPOINT_XFER_INT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) ret = uhci_submit_interrupt(uhci, urb, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) case USB_ENDPOINT_XFER_ISOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) urb->error_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) ret = uhci_submit_isochronous(uhci, urb, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) goto err_submit_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) /* Add this URB to the QH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) list_add_tail(&urbp->node, &qh->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) /* If the new URB is the first and only one on this QH then either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) * the QH is new and idle or else it's unlinked and waiting to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) * become idle, so we can activate it right away. But only if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) * queue isn't stopped. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) if (qh->queue.next == &urbp->node && !qh->is_stopped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) uhci_activate_qh(uhci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) uhci_urbp_wants_fsbr(uhci, urbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) err_submit_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) if (qh->state == QH_STATE_IDLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) uhci_make_qh_idle(uhci, qh); /* Reclaim unused QH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) err_no_qh:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) uhci_free_urb_priv(uhci, urbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) usb_hcd_unlink_urb_from_ep(hcd, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) done_not_linked:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) spin_unlock_irqrestore(&uhci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) struct uhci_hcd *uhci = hcd_to_uhci(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) struct uhci_qh *qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) spin_lock_irqsave(&uhci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) rc = usb_hcd_check_unlink_urb(hcd, urb, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) qh = ((struct urb_priv *) urb->hcpriv)->qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) /* Remove Isochronous TDs from the frame list ASAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) if (qh->type == USB_ENDPOINT_XFER_ISOC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) uhci_unlink_isochronous_tds(uhci, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) /* If the URB has already started, update the QH unlink time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) uhci_get_current_frame_number(uhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) qh->unlink_frame = uhci->frame_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) uhci_unlink_qh(uhci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) spin_unlock_irqrestore(&uhci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) * Finish unlinking an URB and give it back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) struct urb *urb, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) __releases(uhci->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) __acquires(uhci->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) /* Subtract off the length of the SETUP packet from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) * urb->actual_length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) urb->actual_length -= min_t(u32, 8, urb->actual_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) /* When giving back the first URB in an Isochronous queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) * reinitialize the QH's iso-related members for the next URB. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) else if (qh->type == USB_ENDPOINT_XFER_ISOC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) urbp->node.prev == &qh->queue &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) urbp->node.next != &qh->queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) struct urb *nurb = list_entry(urbp->node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) struct urb_priv, node)->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) qh->iso_packet_desc = &nurb->iso_frame_desc[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) qh->iso_frame = nurb->start_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) /* Take the URB off the QH's queue. If the queue is now empty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) * this is a perfect time for a toggle fixup. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) list_del_init(&urbp->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) if (list_empty(&qh->queue) && qh->needs_fixup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) usb_pipeout(urb->pipe), qh->initial_toggle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) qh->needs_fixup = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) uhci_free_urb_priv(uhci, urbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) usb_hcd_unlink_urb_from_ep(uhci_to_hcd(uhci), urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) spin_unlock(&uhci->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) spin_lock(&uhci->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) /* If the queue is now empty, we can unlink the QH and give up its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) * reserved bandwidth. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) if (list_empty(&qh->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) uhci_unlink_qh(uhci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) if (qh->bandwidth_reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) uhci_release_bandwidth(uhci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) * Scan the URBs in a QH's queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) #define QH_FINISHED_UNLINKING(qh) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) (qh->state == QH_STATE_UNLINKING && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) uhci->frame_number + uhci->is_stopped != qh->unlink_frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) struct urb_priv *urbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) struct urb *urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) while (!list_empty(&qh->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) urbp = list_entry(qh->queue.next, struct urb_priv, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) urb = urbp->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) if (qh->type == USB_ENDPOINT_XFER_ISOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) status = uhci_result_isochronous(uhci, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) status = uhci_result_common(uhci, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (status == -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) /* Dequeued but completed URBs can't be given back unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) * the QH is stopped or has finished unlinking. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) if (urb->unlinked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (QH_FINISHED_UNLINKING(qh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) qh->is_stopped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) else if (!qh->is_stopped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) uhci_giveback_urb(uhci, qh, urb, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) /* If the QH is neither stopped nor finished unlinking (normal case),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) * our work here is done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) if (QH_FINISHED_UNLINKING(qh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) qh->is_stopped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) else if (!qh->is_stopped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) /* Otherwise give back each of the dequeued URBs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) list_for_each_entry(urbp, &qh->queue, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) urb = urbp->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) if (urb->unlinked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) /* Fix up the TD links and save the toggles for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) * non-Isochronous queues. For Isochronous queues,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) * test for too-recent dequeues. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) if (!uhci_cleanup_queue(uhci, qh, urb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) qh->is_stopped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) uhci_giveback_urb(uhci, qh, urb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) qh->is_stopped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) /* There are no more dequeued URBs. If there are still URBs on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) * queue, the QH can now be re-activated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) if (!list_empty(&qh->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) if (qh->needs_fixup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) uhci_fixup_toggles(uhci, qh, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) /* If the first URB on the queue wants FSBR but its time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) * limit has expired, set the next TD to interrupt on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) * completion before reactivating the QH. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) urbp = list_entry(qh->queue.next, struct urb_priv, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) if (urbp->fsbr && qh->wait_expired) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) struct uhci_td *td = list_entry(urbp->td_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) struct uhci_td, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) uhci_activate_qh(uhci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) /* The queue is empty. The QH can become idle if it is fully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) * unlinked. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) else if (QH_FINISHED_UNLINKING(qh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) uhci_make_qh_idle(uhci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) * Check for queues that have made some forward progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) * Returns 0 if the queue is not Isochronous, is ACTIVE, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) * has not advanced since last examined; 1 otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) * Early Intel controllers have a bug which causes qh->element sometimes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) * not to advance when a TD completes successfully. The queue remains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) * stuck on the inactive completed TD. We detect such cases and advance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) * the element pointer by hand.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) struct urb_priv *urbp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) struct uhci_td *td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) int ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) unsigned status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) if (qh->type == USB_ENDPOINT_XFER_ISOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) /* Treat an UNLINKING queue as though it hasn't advanced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) * This is okay because reactivation will treat it as though
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) * it has advanced, and if it is going to become IDLE then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) * this doesn't matter anyway. Furthermore it's possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) * for an UNLINKING queue not to have any URBs at all, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) * for its first URB not to have any TDs (if it was dequeued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) * just as it completed). So it's not easy in any case to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) * test whether such queues have advanced. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) if (qh->state != QH_STATE_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) urbp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) urbp = list_entry(qh->queue.next, struct urb_priv, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) td = list_entry(urbp->td_list.next, struct uhci_td, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) status = td_status(uhci, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) if (!(status & TD_CTRL_ACTIVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) /* We're okay, the queue has advanced */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) qh->wait_expired = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) qh->advance_jiffies = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) ret = uhci->is_stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) /* The queue hasn't advanced; check for timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) if (qh->wait_expired)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) /* Detect the Intel bug and work around it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) if (qh->post_td && qh_element(qh) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) LINK_TO_TD(uhci, qh->post_td)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) qh->element = qh->post_td->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) qh->advance_jiffies = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) qh->wait_expired = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) /* If the current URB wants FSBR, unlink it temporarily
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) * so that we can safely set the next TD to interrupt on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) * completion. That way we'll know as soon as the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) * starts moving again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) uhci_unlink_qh(uhci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) /* Unmoving but not-yet-expired queues keep FSBR alive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) if (urbp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) uhci_urbp_wants_fsbr(uhci, urbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) * Process events in the schedule, but only in one thread at a time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) static void uhci_scan_schedule(struct uhci_hcd *uhci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) struct uhci_qh *qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) /* Don't allow re-entrant calls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) if (uhci->scan_in_progress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) uhci->need_rescan = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) uhci->scan_in_progress = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) rescan:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) uhci->need_rescan = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) uhci->fsbr_is_wanted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) uhci_clear_next_interrupt(uhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) uhci_get_current_frame_number(uhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) uhci->cur_iso_frame = uhci->frame_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) /* Go through all the QH queues and process the URBs in each one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) uhci->next_qh = list_entry(uhci->skelqh[i]->node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) struct uhci_qh, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) while ((qh = uhci->next_qh) != uhci->skelqh[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) uhci->next_qh = list_entry(qh->node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) struct uhci_qh, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) if (uhci_advance_check(uhci, qh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) uhci_scan_qh(uhci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) if (qh->state == QH_STATE_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) uhci_urbp_wants_fsbr(uhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) list_entry(qh->queue.next, struct urb_priv, node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) uhci->last_iso_frame = uhci->cur_iso_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) if (uhci->need_rescan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) goto rescan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) uhci->scan_in_progress = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) if (uhci->fsbr_is_on && !uhci->fsbr_is_wanted &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) !uhci->fsbr_expiring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) uhci->fsbr_expiring = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) mod_timer(&uhci->fsbr_timer, jiffies + FSBR_OFF_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) if (list_empty(&uhci->skel_unlink_qh->node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) uhci_clear_next_interrupt(uhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) uhci_set_next_interrupt(uhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) }