^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-1.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * OHCI HCD (Host Controller Driver) for USB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * This file is licenced under the GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) static void urb_free_priv (struct ohci_hcd *hc, urb_priv_t *urb_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) int last = urb_priv->length - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) if (last >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct td *td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) for (i = 0; i <= last; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) td = urb_priv->td [i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) if (td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) td_free (hc, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) list_del (&urb_priv->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) kfree (urb_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * URB goes back to driver, and isn't reissued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * It's completely gone from HC data structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * PRECONDITION: ohci lock held, irqs blocked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) __releases(ohci->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) __acquires(ohci->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct device *dev = ohci_to_hcd(ohci)->self.controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct usb_host_endpoint *ep = urb->ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct urb_priv *urb_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) // ASSERT (urb->hcpriv != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) urb_free_priv (ohci, urb->hcpriv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) urb->hcpriv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (likely(status == -EINPROGRESS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) switch (usb_pipetype (urb->pipe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) case PIPE_ISOCHRONOUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (quirk_amdiso(ohci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) usb_amd_quirk_pll_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (quirk_amdprefetch(ohci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) sb800_prefetch(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) case PIPE_INTERRUPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) ohci_to_hcd(ohci)->self.bandwidth_int_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* urb->complete() can reenter this HCD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) usb_hcd_unlink_urb_from_ep(ohci_to_hcd(ohci), urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) spin_unlock (&ohci->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) usb_hcd_giveback_urb(ohci_to_hcd(ohci), urb, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) spin_lock (&ohci->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* stop periodic dma if it's not needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * An isochronous URB that is sumitted too late won't have any TDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * (marked by the fact that the td_cnt value is larger than the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * actual number of TDs). If the next URB on this endpoint is like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * that, give it back now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (!list_empty(&ep->urb_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) urb = list_first_entry(&ep->urb_list, struct urb, urb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) urb_priv = urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (urb_priv->td_cnt > urb_priv->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /*-------------------------------------------------------------------------*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * ED handling functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) *-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* search for the right schedule branch to use for a periodic ed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * does some load balancing; returns the branch, or negative errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static int balance (struct ohci_hcd *ohci, int interval, int load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) int i, branch = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* iso periods can be huge; iso tds specify frame numbers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (interval > NUM_INTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) interval = NUM_INTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* search for the least loaded schedule branch of that period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * that has enough bandwidth left unreserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) for (i = 0; i < interval ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (branch < 0 || ohci->load [branch] > ohci->load [i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* usb 1.1 says 90% of one frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) for (j = i; j < NUM_INTS; j += interval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if ((ohci->load [j] + load) > 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (j < NUM_INTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) branch = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /* both iso and interrupt requests have periods; this routine puts them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * into the schedule tree in the apppropriate place. most iso devices use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * 1msec periods, but that's not required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static void periodic_link (struct ohci_hcd *ohci, struct ed *ed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) ohci_dbg(ohci, "link %sed %p branch %d [%dus.], interval %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) (ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) ed, ed->branch, ed->load, ed->interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct ed **prev = &ohci->periodic [i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) __hc32 *prev_p = &ohci->hcca->int_table [i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct ed *here = *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /* sorting each branch by period (slow before fast)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * lets us share the faster parts of the tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * (plus maybe: put interrupt eds before iso)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) while (here && ed != here) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (ed->interval > here->interval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) prev = &here->ed_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) prev_p = &here->hwNextED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) here = *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (ed != here) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) ed->ed_next = here;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (here)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) ed->hwNextED = *prev_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) wmb ();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) *prev = ed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) *prev_p = cpu_to_hc32(ohci, ed->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) ohci->load [i] += ed->load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) ohci_to_hcd(ohci)->self.bandwidth_allocated += ed->load / ed->interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* link an ed into one of the HC chains */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) int branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) ed->ed_prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) ed->ed_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) ed->hwNextED = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) wmb ();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* we care about rm_list when setting CLE/BLE in case the HC was at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * work on some TD when CLE/BLE was turned off, and isn't quiesced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * yet. finish_unlinks() restarts as needed, some upcoming INTR_SF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * control and bulk EDs are doubly linked (ed_next, ed_prev), but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * periodic ones are singly linked (ed_next). that's because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * periodic schedule encodes a tree like figure 3-5 in the ohci
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * spec: each qh can have several "previous" nodes, and the tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * doesn't have unused/idle descriptors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) switch (ed->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) case PIPE_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (ohci->ed_controltail == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) WARN_ON (ohci->hc_control & OHCI_CTRL_CLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) ohci_writel (ohci, ed->dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) &ohci->regs->ed_controlhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ohci->ed_controltail->ed_next = ed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ohci->ed_controltail->hwNextED = cpu_to_hc32 (ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) ed->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) ed->ed_prev = ohci->ed_controltail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (!ohci->ed_controltail && !ohci->ed_rm_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) ohci->hc_control |= OHCI_CTRL_CLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) ohci_writel (ohci, 0, &ohci->regs->ed_controlcurrent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ohci_writel (ohci, ohci->hc_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) &ohci->regs->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) ohci->ed_controltail = ed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) case PIPE_BULK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (ohci->ed_bulktail == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) WARN_ON (ohci->hc_control & OHCI_CTRL_BLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) ohci_writel (ohci, ed->dma, &ohci->regs->ed_bulkhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) ohci->ed_bulktail->ed_next = ed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) ohci->ed_bulktail->hwNextED = cpu_to_hc32 (ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) ed->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) ed->ed_prev = ohci->ed_bulktail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (!ohci->ed_bulktail && !ohci->ed_rm_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) ohci->hc_control |= OHCI_CTRL_BLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) ohci_writel (ohci, 0, &ohci->regs->ed_bulkcurrent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ohci_writel (ohci, ohci->hc_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) &ohci->regs->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) ohci->ed_bulktail = ed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) // case PIPE_INTERRUPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) // case PIPE_ISOCHRONOUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) branch = balance (ohci, ed->interval, ed->load);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (branch < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) ohci_dbg (ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) "ERR %d, interval %d msecs, load %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) branch, ed->interval, ed->load);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) // FIXME if there are TDs queued, fail them!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) ed->branch = branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) periodic_link (ohci, ed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* the HC may not see the schedule updates yet, but if it does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * then they'll be properly ordered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) ed->state = ED_OPER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* scan the periodic table to find and unlink this ED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) struct ed *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct ed **prev = &ohci->periodic [i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) __hc32 *prev_p = &ohci->hcca->int_table [i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) while (*prev && (temp = *prev) != ed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) prev_p = &temp->hwNextED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) prev = &temp->ed_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (*prev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) *prev_p = ed->hwNextED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) *prev = ed->ed_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) ohci->load [i] -= ed->load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) ohci_to_hcd(ohci)->self.bandwidth_allocated -= ed->load / ed->interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) ohci_dbg(ohci, "unlink %sed %p branch %d [%dus.], interval %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) (ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) ed, ed->branch, ed->load, ed->interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* unlink an ed from one of the HC chains.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * just the link to the ed is unlinked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * the link from the ed still points to another operational ed or 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * so the HC can eventually finish the processing of the unlinked ed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * (assuming it already started that, which needn't be true).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * ED_UNLINK is a transient state: the HC may still see this ED, but soon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * it won't. ED_SKIP means the HC will finish its current transaction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * but won't start anything new. The TD queue may still grow; device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * drivers don't know about this HCD-internal state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * When the HC can't see the ED, something changes ED_UNLINK to one of:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * - ED_OPER: when there's any request queued, the ED gets rescheduled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * immediately. HC should be working on them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * - ED_IDLE: when there's no TD queue or the HC isn't running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * When finish_unlinks() runs later, after SOF interrupt, it will often
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * complete one or more URB unlinks before making that state change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static void ed_deschedule (struct ohci_hcd *ohci, struct ed *ed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) wmb ();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) ed->state = ED_UNLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* To deschedule something from the control or bulk list, just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * clear CLE/BLE and wait. There's no safe way to scrub out list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * head/current registers until later, and "later" isn't very
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * tightly specified. Figure 6-5 and Section 6.4.2.2 show how
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * the HC is reading the ED queues (while we modify them).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * For now, ed_schedule() is "later". It might be good paranoia
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * to scrub those registers in finish_unlinks(), in case of bugs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * that make the HC try to use them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) switch (ed->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) case PIPE_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* remove ED from the HC's list: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (ed->ed_prev == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (!ed->hwNextED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) ohci->hc_control &= ~OHCI_CTRL_CLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) ohci_writel (ohci, ohci->hc_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) &ohci->regs->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) // a ohci_readl() later syncs CLE with the HC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) ohci_writel (ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) hc32_to_cpup (ohci, &ed->hwNextED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) &ohci->regs->ed_controlhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) ed->ed_prev->ed_next = ed->ed_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) ed->ed_prev->hwNextED = ed->hwNextED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* remove ED from the HCD's list: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (ohci->ed_controltail == ed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) ohci->ed_controltail = ed->ed_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (ohci->ed_controltail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) ohci->ed_controltail->ed_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) } else if (ed->ed_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) ed->ed_next->ed_prev = ed->ed_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) case PIPE_BULK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* remove ED from the HC's list: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (ed->ed_prev == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (!ed->hwNextED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) ohci->hc_control &= ~OHCI_CTRL_BLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) ohci_writel (ohci, ohci->hc_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) &ohci->regs->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) // a ohci_readl() later syncs BLE with the HC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) ohci_writel (ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) hc32_to_cpup (ohci, &ed->hwNextED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) &ohci->regs->ed_bulkhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) ed->ed_prev->ed_next = ed->ed_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) ed->ed_prev->hwNextED = ed->hwNextED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /* remove ED from the HCD's list: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (ohci->ed_bulktail == ed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) ohci->ed_bulktail = ed->ed_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (ohci->ed_bulktail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) ohci->ed_bulktail->ed_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) } else if (ed->ed_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) ed->ed_next->ed_prev = ed->ed_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) // case PIPE_INTERRUPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) // case PIPE_ISOCHRONOUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) periodic_unlink (ohci, ed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) /* get and maybe (re)init an endpoint. init _should_ be done only as part
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * of enumeration, usb_set_configuration() or usb_set_interface().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static struct ed *ed_get (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct ohci_hcd *ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct usb_host_endpoint *ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) struct usb_device *udev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) unsigned int pipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) int interval
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct ed *ed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) spin_lock_irqsave (&ohci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) ed = ep->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (!ed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) struct td *td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) int is_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) u32 info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) ed = ed_alloc (ohci, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (!ed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /* out of memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) /* dummy td; end of td list for ed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) td = td_alloc (ohci, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (!td) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /* out of memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) ed_free (ohci, ed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) ed = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) ed->dummy = td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ed->hwTailP = cpu_to_hc32 (ohci, td->td_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) ed->hwHeadP = ed->hwTailP; /* ED_C, ED_H zeroed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) ed->state = ED_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) is_out = !(ep->desc.bEndpointAddress & USB_DIR_IN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) /* FIXME usbcore changes dev->devnum before SET_ADDRESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * succeeds ... otherwise we wouldn't need "pipe".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) info = usb_pipedevice (pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) ed->type = usb_pipetype(pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) info |= (ep->desc.bEndpointAddress & ~USB_DIR_IN) << 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) info |= usb_endpoint_maxp(&ep->desc) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (udev->speed == USB_SPEED_LOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) info |= ED_LOWSPEED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /* only control transfers store pids in tds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (ed->type != PIPE_CONTROL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) info |= is_out ? ED_OUT : ED_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (ed->type != PIPE_BULK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /* periodic transfers... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (ed->type == PIPE_ISOCHRONOUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) info |= ED_ISO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) else if (interval > 32) /* iso can be bigger */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) interval = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) ed->interval = interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) ed->load = usb_calc_bus_time (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) udev->speed, !is_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) ed->type == PIPE_ISOCHRONOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) usb_endpoint_maxp(&ep->desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) / 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) ed->hwINFO = cpu_to_hc32(ohci, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) ep->hcpriv = ed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) spin_unlock_irqrestore (&ohci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return ed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /* request unlinking of an endpoint from an operational HC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * put the ep on the rm_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * real work is done at the next start frame (SF) hardware interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * caller guarantees HCD is running, so hardware access is safe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * and that ed->state is ED_OPER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) static void start_ed_unlink (struct ohci_hcd *ohci, struct ed *ed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) ed->hwINFO |= cpu_to_hc32 (ohci, ED_DEQUEUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) ed_deschedule (ohci, ed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /* rm_list is just singly linked, for simplicity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) ed->ed_next = ohci->ed_rm_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) ed->ed_prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) ohci->ed_rm_list = ed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) /* enable SOF interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrenable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) // flush those writes, and get latest HCCA contents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) (void) ohci_readl (ohci, &ohci->regs->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) /* SF interrupt might get delayed; record the frame counter value that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * indicates when the HC isn't looking at it, so concurrent unlinks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * behave. frame_no wraps every 2^16 msec, and changes right before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * SF is triggered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) ed->tick = ohci_frame_no(ohci) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /*-------------------------------------------------------------------------*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * TD handling functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) *-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /* enqueue next TD for this URB (OHCI spec 5.2.8.2) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) td_fill (struct ohci_hcd *ohci, u32 info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) dma_addr_t data, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) struct urb *urb, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct td *td, *td_pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct urb_priv *urb_priv = urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) int is_iso = info & TD_ISO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) int hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) // ASSERT (index < urb_priv->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) /* aim for only one interrupt per urb. mostly applies to control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * and iso; other urbs rarely need more than one TD per urb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * this way, only final tds (or ones with an error) cause IRQs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * at least immediately; use DI=6 in case any control request is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * tempted to die part way through. (and to force the hc to flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * its donelist soonish, even on unlink paths.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * NOTE: could delay interrupts even for the last TD, and get fewer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * interrupts ... increasing per-urb latency by sharing interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * Drivers that queue bulk urbs may request that behavior.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (index != (urb_priv->length - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) || (urb->transfer_flags & URB_NO_INTERRUPT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) info |= TD_DI_SET (6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) /* use this td as the next dummy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) td_pt = urb_priv->td [index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /* fill the old dummy TD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) td = urb_priv->td [index] = urb_priv->ed->dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) urb_priv->ed->dummy = td_pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) td->ed = urb_priv->ed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) td->next_dl_td = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) td->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) td->urb = urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) td->data_dma = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) td->hwINFO = cpu_to_hc32 (ohci, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (is_iso) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) (data & 0x0FFF) | 0xE000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) td->hwCBP = cpu_to_hc32 (ohci, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) td->hwBE = cpu_to_hc32 (ohci, data + len - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) td->hwBE = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) td->hwNextTD = cpu_to_hc32 (ohci, td_pt->td_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /* append to queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) list_add_tail (&td->td_list, &td->ed->td_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /* hash it for later reverse mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) hash = TD_HASH_FUNC (td->td_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) td->td_hash = ohci->td_hash [hash];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) ohci->td_hash [hash] = td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /* HC might read the TD (or cachelines) right away ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) wmb ();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) td->ed->hwTailP = td->hwNextTD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /* Prepare all TDs of a transfer, and queue them onto the ED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * Caller guarantees HC is active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * Usually the ED is already on the schedule, so TDs might be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * processed as soon as they're queued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) static void td_submit_urb (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct ohci_hcd *ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) struct urb *urb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct urb_priv *urb_priv = urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct device *dev = ohci_to_hcd(ohci)->self.controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) dma_addr_t data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) int data_len = urb->transfer_buffer_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) int cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) u32 info = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) int is_out = usb_pipeout (urb->pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) int periodic = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) int i, this_sg_len, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) /* OHCI handles the bulk/interrupt data toggles itself. We just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * use the device toggle bits for resetting, and rely on the fact
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * that resetting toggle is meaningless if the endpoint is active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (!usb_gettoggle (urb->dev, usb_pipeendpoint (urb->pipe), is_out)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) is_out, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) urb_priv->ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_C);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) list_add (&urb_priv->pending, &ohci->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) i = urb->num_mapped_sgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (data_len > 0 && i > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) sg = urb->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) data = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * urb->transfer_buffer_length may be smaller than the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * size of the scatterlist (or vice versa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) this_sg_len = min_t(int, sg_dma_len(sg), data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) sg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) data = urb->transfer_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) this_sg_len = data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /* NOTE: TD_CC is set so we can tell which TDs the HC processed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * using TD_CC_GET, as well as by seeing them on the done list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) switch (urb_priv->ed->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /* Bulk and interrupt are identical except for where in the schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * their EDs live.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) case PIPE_INTERRUPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) /* ... and periodic urbs have extra accounting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) periodic = ohci_to_hcd(ohci)->self.bandwidth_int_reqs++ == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) && ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) case PIPE_BULK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) info = is_out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) ? TD_T_TOGGLE | TD_CC | TD_DP_OUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) : TD_T_TOGGLE | TD_CC | TD_DP_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /* TDs _could_ transfer up to 8K each */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) n = min(this_sg_len, 4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /* maybe avoid ED halt on final TD short read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (n >= data_len || (i == 1 && n >= this_sg_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) info |= TD_R;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) td_fill(ohci, info, data, n, urb, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) this_sg_len -= n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) data_len -= n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) data += n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (this_sg_len <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (--i <= 0 || data_len <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) data = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) this_sg_len = min_t(int, sg_dma_len(sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if ((urb->transfer_flags & URB_ZERO_PACKET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) && cnt < urb_priv->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) td_fill (ohci, info, 0, 0, urb, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /* maybe kickstart bulk list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (urb_priv->ed->type == PIPE_BULK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) wmb ();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) ohci_writel (ohci, OHCI_BLF, &ohci->regs->cmdstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /* control manages DATA0/DATA1 toggle per-request; SETUP resets it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * any DATA phase works normally, and the STATUS ack is special.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) case PIPE_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) info = TD_CC | TD_DP_SETUP | TD_T_DATA0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) td_fill (ohci, info, urb->setup_dma, 8, urb, cnt++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (data_len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) info = TD_CC | TD_R | TD_T_DATA1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) info |= is_out ? TD_DP_OUT : TD_DP_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) /* NOTE: mishandles transfers >8K, some >4K */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) td_fill (ohci, info, data, data_len, urb, cnt++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) info = (is_out || data_len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) ? TD_CC | TD_DP_IN | TD_T_DATA1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) : TD_CC | TD_DP_OUT | TD_T_DATA1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) td_fill (ohci, info, data, 0, urb, cnt++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) /* maybe kickstart control list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) wmb ();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) ohci_writel (ohci, OHCI_CLF, &ohci->regs->cmdstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /* ISO has no retransmit, so no toggle; and it uses special TDs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * Each TD could handle multiple consecutive frames (interval 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * we could often reduce the number of TDs here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) case PIPE_ISOCHRONOUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) for (cnt = urb_priv->td_cnt; cnt < urb->number_of_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) cnt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) int frame = urb->start_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) // FIXME scheduling should handle frame counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) // roll-around ... exotic case (and OHCI has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) // a 2^16 iso range, vs other HCs max of 2^10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) frame += cnt * urb->interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) frame &= 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) td_fill (ohci, TD_CC | TD_ISO | frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) data + urb->iso_frame_desc [cnt].offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) urb->iso_frame_desc [cnt].length, urb, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (quirk_amdiso(ohci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) usb_amd_quirk_pll_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (quirk_amdprefetch(ohci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) sb800_prefetch(dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /* start periodic dma if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (periodic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) wmb ();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) ohci->hc_control |= OHCI_CTRL_PLE|OHCI_CTRL_IE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) // ASSERT (urb_priv->length == cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) /*-------------------------------------------------------------------------*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * Done List handling functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) *-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) /* calculate transfer length/status and update the urb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) static int td_done(struct ohci_hcd *ohci, struct urb *urb, struct td *td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) u32 tdINFO = hc32_to_cpup (ohci, &td->hwINFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) int cc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) int status = -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) list_del (&td->td_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /* ISO ... drivers see per-TD length/status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (tdINFO & TD_ISO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) u16 tdPSW = ohci_hwPSW(ohci, td, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) int dlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) /* NOTE: assumes FC in tdINFO == 0, and that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * only the first of 0..MAXPSW psws is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) cc = (tdPSW >> 12) & 0xF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (tdINFO & TD_CC) /* hc didn't touch? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (usb_pipeout (urb->pipe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) dlen = urb->iso_frame_desc [td->index].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) /* short reads are always OK for ISO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (cc == TD_DATAUNDERRUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) cc = TD_CC_NOERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) dlen = tdPSW & 0x3ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) urb->actual_length += dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) urb->iso_frame_desc [td->index].actual_length = dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) urb->iso_frame_desc [td->index].status = cc_to_error [cc];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (cc != TD_CC_NOERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) ohci_dbg(ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) "urb %p iso td %p (%d) len %d cc %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) urb, td, 1 + td->index, dlen, cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /* BULK, INT, CONTROL ... drivers see aggregate length/status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * except that "setup" bytes aren't counted and "short" transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * might not be reported as errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) int type = usb_pipetype (urb->pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) u32 tdBE = hc32_to_cpup (ohci, &td->hwBE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) cc = TD_CC_GET (tdINFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) /* update packet status if needed (short is normally ok) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (cc == TD_DATAUNDERRUN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) && !(urb->transfer_flags & URB_SHORT_NOT_OK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) cc = TD_CC_NOERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (cc != TD_CC_NOERROR && cc < 0x0E)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) status = cc_to_error[cc];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) /* count all non-empty packets except control SETUP packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if ((type != PIPE_CONTROL || td->index != 0) && tdBE != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (td->hwCBP == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) urb->actual_length += tdBE - td->data_dma + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) urb->actual_length +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) hc32_to_cpup (ohci, &td->hwCBP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) - td->data_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (cc != TD_CC_NOERROR && cc < 0x0E)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) ohci_dbg(ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) "urb %p td %p (%d) cc %d, len=%d/%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) urb, td, 1 + td->index, cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) urb->actual_length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) urb->transfer_buffer_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) static void ed_halted(struct ohci_hcd *ohci, struct td *td, int cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) struct urb *urb = td->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) urb_priv_t *urb_priv = urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) struct ed *ed = td->ed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) struct list_head *tmp = td->td_list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) __hc32 toggle = ed->hwHeadP & cpu_to_hc32 (ohci, ED_C);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) /* clear ed halt; this is the td that caused it, but keep it inactive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * until its urb->complete() has a chance to clean up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) wmb ();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_H);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /* Get rid of all later tds from this urb. We don't have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * to be careful: no errors and nothing was transferred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * Also patch the ed so it looks as if those tds completed normally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) while (tmp != &ed->td_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) struct td *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) next = list_entry (tmp, struct td, td_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) tmp = next->td_list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (next->urb != urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) /* NOTE: if multi-td control DATA segments get supported,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * this urb had one of them, this td wasn't the last td
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * in that segment (TD_R clear), this ed halted because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * of a short read, _and_ URB_SHORT_NOT_OK is clear ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * then we need to leave the control STATUS packet queued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * and clear ED_SKIP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) list_del(&next->td_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) urb_priv->td_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) ed->hwHeadP = next->hwNextTD | toggle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) /* help for troubleshooting: report anything that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * looks odd ... that doesn't include protocol stalls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * (or maybe some other things)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) switch (cc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) case TD_DATAUNDERRUN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) case TD_CC_STALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (usb_pipecontrol (urb->pipe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) ohci_dbg (ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) "urb %p path %s ep%d%s %08x cc %d --> status %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) urb, urb->dev->devpath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) usb_pipeendpoint (urb->pipe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) usb_pipein (urb->pipe) ? "in" : "out",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) hc32_to_cpu (ohci, td->hwINFO),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) cc, cc_to_error [cc]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) /* Add a TD to the done list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) static void add_to_done_list(struct ohci_hcd *ohci, struct td *td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) struct td *td2, *td_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) struct ed *ed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (td->next_dl_td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) return; /* Already on the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) /* Add all the TDs going back until we reach one that's on the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) ed = td->ed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) td2 = td_prev = td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) list_for_each_entry_continue_reverse(td2, &ed->td_list, td_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (td2->next_dl_td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) td2->next_dl_td = td_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) td_prev = td2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (ohci->dl_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) ohci->dl_end->next_dl_td = td_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) ohci->dl_start = td_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * Make td->next_dl_td point to td itself, to mark the fact
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * that td is on the done list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) ohci->dl_end = td->next_dl_td = td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) /* Did we just add the latest pending TD? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) td2 = ed->pending_td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (td2 && td2->next_dl_td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) ed->pending_td = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /* Get the entries on the hardware done queue and put them on our list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) static void update_done_list(struct ohci_hcd *ohci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) u32 td_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) struct td *td = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) td_dma = hc32_to_cpup (ohci, &ohci->hcca->done_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) ohci->hcca->done_head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) /* get TD from hc's singly linked list, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * add to ours. ed->td_list changes later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) while (td_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) int cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) td = dma_to_td (ohci, td_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (!td) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) ohci_err (ohci, "bad entry %8x\n", td_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) td->hwINFO |= cpu_to_hc32 (ohci, TD_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) cc = TD_CC_GET (hc32_to_cpup (ohci, &td->hwINFO));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) /* Non-iso endpoints can halt on error; un-halt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * and dequeue any other TDs from this urb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * No other TD could have caused the halt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (cc != TD_CC_NOERROR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) && (td->ed->hwHeadP & cpu_to_hc32 (ohci, ED_H)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) ed_halted(ohci, td, cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) td_dma = hc32_to_cpup (ohci, &td->hwNextTD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) add_to_done_list(ohci, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) /* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) static void finish_unlinks(struct ohci_hcd *ohci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) unsigned tick = ohci_frame_no(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) struct ed *ed, **last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) rescan_all:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) for (last = &ohci->ed_rm_list, ed = *last; ed != NULL; ed = *last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) struct list_head *entry, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) int completed, modified;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) __hc32 *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) /* only take off EDs that the HC isn't using, accounting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * frame counter wraps and EDs with partially retired TDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (likely(ohci->rh_state == OHCI_RH_RUNNING) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) tick_before(tick, ed->tick)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) skip_ed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) last = &ed->ed_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (!list_empty(&ed->td_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) struct td *td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) u32 head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) td = list_first_entry(&ed->td_list, struct td, td_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) /* INTR_WDH may need to clean up first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) head = hc32_to_cpu(ohci, ed->hwHeadP) & TD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (td->td_dma != head &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) ohci->rh_state == OHCI_RH_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) goto skip_ed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) /* Don't mess up anything already on the done list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (td->next_dl_td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) goto skip_ed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) /* ED's now officially unlinked, hc doesn't see */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) ed->hwNextED = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) /* reentrancy: if we drop the schedule lock, someone might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * have modified this list. normally it's just prepending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) * entries (which we'd ignore), but paranoia won't hurt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) *last = ed->ed_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) ed->ed_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) modified = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) /* unlink urbs as requested, but rescan the list after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * we call a completion since it might have unlinked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * another (earlier) urb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * When we get here, the HC doesn't see this ed. But it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * must not be rescheduled until all completed URBs have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * been given back to the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) rescan_this:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) completed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) prev = &ed->hwHeadP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) list_for_each_safe (entry, tmp, &ed->td_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) struct td *td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) struct urb *urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) urb_priv_t *urb_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) __hc32 savebits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) u32 tdINFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) td = list_entry (entry, struct td, td_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) urb = td->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) urb_priv = td->urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (!urb->unlinked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) prev = &td->hwNextTD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) /* patch pointer hc uses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) savebits = *prev & ~cpu_to_hc32 (ohci, TD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) *prev = td->hwNextTD | savebits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) /* If this was unlinked, the TD may not have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) * retired ... so manually save the data toggle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) * The controller ignores the value we save for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) * control and ISO endpoints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) tdINFO = hc32_to_cpup(ohci, &td->hwINFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if ((tdINFO & TD_T) == TD_T_DATA0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_C);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) else if ((tdINFO & TD_T) == TD_T_DATA1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) ed->hwHeadP |= cpu_to_hc32(ohci, ED_C);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) /* HC may have partly processed this TD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) td_done (ohci, urb, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) urb_priv->td_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) /* if URB is done, clean up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (urb_priv->td_cnt >= urb_priv->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) modified = completed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) finish_urb(ohci, urb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (completed && !list_empty (&ed->td_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) goto rescan_this;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) * If no TDs are queued, ED is now idle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * Otherwise, if the HC is running, reschedule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) * If the HC isn't running, add ED back to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) * start of the list for later processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) if (list_empty(&ed->td_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) ed->state = ED_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) list_del(&ed->in_use_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) } else if (ohci->rh_state == OHCI_RH_RUNNING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) ed_schedule(ohci, ed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) ed->ed_next = ohci->ed_rm_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) ohci->ed_rm_list = ed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) /* Don't loop on the same ED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if (last == &ohci->ed_rm_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) last = &ed->ed_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) if (modified)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) goto rescan_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) /* maybe reenable control and bulk lists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (ohci->rh_state == OHCI_RH_RUNNING && !ohci->ed_rm_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) u32 command = 0, control = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) if (ohci->ed_controltail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) command |= OHCI_CLF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) if (quirk_zfmicro(ohci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) if (!(ohci->hc_control & OHCI_CTRL_CLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) control |= OHCI_CTRL_CLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) ohci_writel (ohci, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) &ohci->regs->ed_controlcurrent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (ohci->ed_bulktail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) command |= OHCI_BLF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (quirk_zfmicro(ohci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) if (!(ohci->hc_control & OHCI_CTRL_BLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) control |= OHCI_CTRL_BLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) ohci_writel (ohci, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) &ohci->regs->ed_bulkcurrent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) /* CLE/BLE to enable, CLF/BLF to (maybe) kickstart */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) if (control) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) ohci->hc_control |= control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) if (quirk_zfmicro(ohci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) ohci_writel (ohci, ohci->hc_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) &ohci->regs->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (command) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) if (quirk_zfmicro(ohci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) ohci_writel (ohci, command, &ohci->regs->cmdstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) /* Take back a TD from the host controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) static void takeback_td(struct ohci_hcd *ohci, struct td *td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) struct urb *urb = td->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) urb_priv_t *urb_priv = urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) struct ed *ed = td->ed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) /* update URB's length and status from TD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) status = td_done(ohci, urb, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) urb_priv->td_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) /* If all this urb's TDs are done, call complete() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) if (urb_priv->td_cnt >= urb_priv->length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) finish_urb(ohci, urb, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) /* clean schedule: unlink EDs that are no longer busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) if (list_empty(&ed->td_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) if (ed->state == ED_OPER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) start_ed_unlink(ohci, ed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) /* ... reenabling halted EDs only after fault cleanup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) } else if ((ed->hwINFO & cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) == cpu_to_hc32(ohci, ED_SKIP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) td = list_entry(ed->td_list.next, struct td, td_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) if (!(td->hwINFO & cpu_to_hc32(ohci, TD_DONE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) /* ... hc may need waking-up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) switch (ed->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) case PIPE_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) ohci_writel(ohci, OHCI_CLF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) &ohci->regs->cmdstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) case PIPE_BULK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) ohci_writel(ohci, OHCI_BLF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) &ohci->regs->cmdstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) * Process normal completions (error or success) and clean the schedules.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) * This is the main path for handing urbs back to drivers. The only other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * normal path is finish_unlinks(), which unlinks URBs using ed_rm_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) * instead of scanning the (re-reversed) donelist as this does.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) static void process_done_list(struct ohci_hcd *ohci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) struct td *td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) while (ohci->dl_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) td = ohci->dl_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (td == ohci->dl_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) ohci->dl_start = ohci->dl_end = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) ohci->dl_start = td->next_dl_td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) takeback_td(ohci, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * TD takeback and URB giveback must be single-threaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) * This routine takes care of it all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) static void ohci_work(struct ohci_hcd *ohci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if (ohci->working) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) ohci->restart_work = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) ohci->working = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) process_done_list(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (ohci->ed_rm_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) finish_unlinks(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) if (ohci->restart_work) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) ohci->restart_work = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) ohci->working = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }